be2net: Enable SR-IOV for Lancer
[deliverable/linux.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
18#include "be.h"
8788fdc2 19#include "be_cmds.h"
65f71b8b 20#include <asm/div64.h>
6b7c5b94
SP
21
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
2e588f84 28static ushort rx_frag_size = 2048;
ba343c77 29static unsigned int num_vfs;
2e588f84 30module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
3abcdeda
SP
35static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276
AK
49/* UE Status Low CSR */
50static char *ue_status_low_desc[] = {
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
85static char *ue_status_hi_desc[] = {
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
109 "NETC"
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
6b7c5b94
SP
119
120static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
121{
122 struct be_dma_mem *mem = &q->dma_mem;
123 if (mem->va)
2b7bcebf
IV
124 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
125 mem->dma);
6b7c5b94
SP
126}
127
128static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
129 u16 len, u16 entry_size)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
132
133 memset(q, 0, sizeof(*q));
134 q->len = len;
135 q->entry_size = entry_size;
136 mem->size = len * entry_size;
2b7bcebf
IV
137 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
138 GFP_KERNEL);
6b7c5b94
SP
139 if (!mem->va)
140 return -1;
141 memset(mem->va, 0, mem->size);
142 return 0;
143}
144
8788fdc2 145static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 146{
8788fdc2 147 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
148 u32 reg = ioread32(addr);
149 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 150
cf588477
SP
151 if (adapter->eeh_err)
152 return;
153
5f0b849e 154 if (!enabled && enable)
6b7c5b94 155 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 156 else if (enabled && !enable)
6b7c5b94 157 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 158 else
6b7c5b94 159 return;
5f0b849e 160
6b7c5b94
SP
161 iowrite32(reg, addr);
162}
163
8788fdc2 164static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
165{
166 u32 val = 0;
167 val |= qid & DB_RQ_RING_ID_MASK;
168 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
169
170 wmb();
8788fdc2 171 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
172}
173
8788fdc2 174static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
175{
176 u32 val = 0;
177 val |= qid & DB_TXULP_RING_ID_MASK;
178 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
179
180 wmb();
8788fdc2 181 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
182}
183
8788fdc2 184static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
185 bool arm, bool clear_int, u16 num_popped)
186{
187 u32 val = 0;
188 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
189 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
191
192 if (adapter->eeh_err)
193 return;
194
6b7c5b94
SP
195 if (arm)
196 val |= 1 << DB_EQ_REARM_SHIFT;
197 if (clear_int)
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
202}
203
8788fdc2 204void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
205{
206 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
208 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
210
211 if (adapter->eeh_err)
212 return;
213
6b7c5b94
SP
214 if (arm)
215 val |= 1 << DB_CQ_REARM_SHIFT;
216 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 217 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
218}
219
6b7c5b94
SP
220static int be_mac_addr_set(struct net_device *netdev, void *p)
221{
222 struct be_adapter *adapter = netdev_priv(netdev);
223 struct sockaddr *addr = p;
224 int status = 0;
225
ca9e4988
AK
226 if (!is_valid_ether_addr(addr->sa_data))
227 return -EADDRNOTAVAIL;
228
ba343c77
SB
229 /* MAC addr configuration will be done in hardware for VFs
230 * by their corresponding PFs. Just copy to netdev addr here
231 */
232 if (!be_physfn(adapter))
233 goto netdev_addr;
234
f8617e08
AK
235 status = be_cmd_pmac_del(adapter, adapter->if_handle,
236 adapter->pmac_id, 0);
a65027e4
SP
237 if (status)
238 return status;
6b7c5b94 239
a65027e4 240 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 241 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 242netdev_addr:
6b7c5b94
SP
243 if (!status)
244 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
245
246 return status;
247}
248
89a88ab8
AK
249static void populate_be2_stats(struct be_adapter *adapter)
250{
251
252 struct be_drv_stats *drvs = &adapter->drv_stats;
253 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
254 struct be_port_rxf_stats_v0 *port_stats =
255 be_port_rxf_stats_from_cmd(adapter);
256 struct be_rxf_stats_v0 *rxf_stats =
257 be_rxf_stats_from_cmd(adapter);
258
259 drvs->rx_pause_frames = port_stats->rx_pause_frames;
260 drvs->rx_crc_errors = port_stats->rx_crc_errors;
261 drvs->rx_control_frames = port_stats->rx_control_frames;
262 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
273 drvs->rx_input_fifo_overflow_drop =
274 port_stats->rx_input_fifo_overflow;
275 drvs->rx_dropped_header_too_small =
276 port_stats->rx_dropped_header_too_small;
277 drvs->rx_address_match_errors =
278 port_stats->rx_address_match_errors;
279 drvs->rx_alignment_symbol_errors =
280 port_stats->rx_alignment_symbol_errors;
281
282 drvs->tx_pauseframes = port_stats->tx_pauseframes;
283 drvs->tx_controlframes = port_stats->tx_controlframes;
284
285 if (adapter->port_num)
286 drvs->jabber_events =
287 rxf_stats->port1_jabber_events;
288 else
289 drvs->jabber_events =
290 rxf_stats->port0_jabber_events;
291 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
292 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
293 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
294 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
295 drvs->forwarded_packets = rxf_stats->forwarded_packets;
296 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
297 drvs->rx_drops_no_tpre_descr =
298 rxf_stats->rx_drops_no_tpre_descr;
299 drvs->rx_drops_too_many_frags =
300 rxf_stats->rx_drops_too_many_frags;
301 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
302}
303
304static void populate_be3_stats(struct be_adapter *adapter)
305{
306 struct be_drv_stats *drvs = &adapter->drv_stats;
307 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
308
309 struct be_rxf_stats_v1 *rxf_stats =
310 be_rxf_stats_from_cmd(adapter);
311 struct be_port_rxf_stats_v1 *port_stats =
312 be_port_rxf_stats_from_cmd(adapter);
313
314 drvs->rx_priority_pause_frames = 0;
315 drvs->pmem_fifo_overflow_drop = 0;
316 drvs->rx_pause_frames = port_stats->rx_pause_frames;
317 drvs->rx_crc_errors = port_stats->rx_crc_errors;
318 drvs->rx_control_frames = port_stats->rx_control_frames;
319 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
320 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
321 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
322 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
323 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
324 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
325 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
326 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
327 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
328 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
329 drvs->rx_dropped_header_too_small =
330 port_stats->rx_dropped_header_too_small;
331 drvs->rx_input_fifo_overflow_drop =
332 port_stats->rx_input_fifo_overflow_drop;
333 drvs->rx_address_match_errors =
334 port_stats->rx_address_match_errors;
335 drvs->rx_alignment_symbol_errors =
336 port_stats->rx_alignment_symbol_errors;
337 drvs->rxpp_fifo_overflow_drop =
338 port_stats->rxpp_fifo_overflow_drop;
339 drvs->tx_pauseframes = port_stats->tx_pauseframes;
340 drvs->tx_controlframes = port_stats->tx_controlframes;
341 drvs->jabber_events = port_stats->jabber_events;
342 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
343 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
344 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
345 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
346 drvs->forwarded_packets = rxf_stats->forwarded_packets;
347 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
348 drvs->rx_drops_no_tpre_descr =
349 rxf_stats->rx_drops_no_tpre_descr;
350 drvs->rx_drops_too_many_frags =
351 rxf_stats->rx_drops_too_many_frags;
352 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
353}
354
005d5696
SX
355static void populate_lancer_stats(struct be_adapter *adapter)
356{
89a88ab8 357
005d5696
SX
358 struct be_drv_stats *drvs = &adapter->drv_stats;
359 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
360 (adapter);
361 drvs->rx_priority_pause_frames = 0;
362 drvs->pmem_fifo_overflow_drop = 0;
363 drvs->rx_pause_frames =
364 make_64bit_val(pport_stats->rx_pause_frames_lo,
365 pport_stats->rx_pause_frames_hi);
366 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
367 pport_stats->rx_crc_errors_lo);
368 drvs->rx_control_frames =
369 make_64bit_val(pport_stats->rx_control_frames_hi,
370 pport_stats->rx_control_frames_lo);
371 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
372 drvs->rx_frame_too_long =
373 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
374 pport_stats->rx_frames_too_long_lo);
375 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
376 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
377 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
378 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
379 drvs->rx_dropped_tcp_length =
380 pport_stats->rx_dropped_invalid_tcp_length;
381 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
382 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
383 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
384 drvs->rx_dropped_header_too_small =
385 pport_stats->rx_dropped_header_too_small;
386 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
387 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
388 drvs->rx_alignment_symbol_errors =
389 make_64bit_val(pport_stats->rx_symbol_errors_hi,
390 pport_stats->rx_symbol_errors_lo);
391 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
392 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
393 pport_stats->tx_pause_frames_lo);
394 drvs->tx_controlframes =
395 make_64bit_val(pport_stats->tx_control_frames_hi,
396 pport_stats->tx_control_frames_lo);
397 drvs->jabber_events = pport_stats->rx_jabbers;
398 drvs->rx_drops_no_pbuf = 0;
399 drvs->rx_drops_no_txpb = 0;
400 drvs->rx_drops_no_erx_descr = 0;
401 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
402 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
403 pport_stats->num_forwards_lo);
404 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
405 pport_stats->rx_drops_mtu_lo);
406 drvs->rx_drops_no_tpre_descr = 0;
407 drvs->rx_drops_too_many_frags =
408 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
409 pport_stats->rx_drops_too_many_frags_lo);
410}
89a88ab8
AK
411
412void be_parse_stats(struct be_adapter *adapter)
413{
005d5696
SX
414 if (adapter->generation == BE_GEN3) {
415 if (lancer_chip(adapter))
416 populate_lancer_stats(adapter);
417 else
418 populate_be3_stats(adapter);
419 } else {
89a88ab8 420 populate_be2_stats(adapter);
005d5696 421 }
89a88ab8
AK
422}
423
b31c50a7 424void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 425{
89a88ab8 426 struct be_drv_stats *drvs = &adapter->drv_stats;
78122a52 427 struct net_device_stats *dev_stats = &adapter->netdev->stats;
3abcdeda
SP
428 struct be_rx_obj *rxo;
429 int i;
6b7c5b94 430
3abcdeda
SP
431 memset(dev_stats, 0, sizeof(*dev_stats));
432 for_all_rx_queues(adapter, rxo, i) {
433 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
434 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
435 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
436 /* no space in linux buffers: best possible approximation */
89a88ab8 437 if (adapter->generation == BE_GEN3) {
005d5696
SX
438 if (!(lancer_chip(adapter))) {
439 struct be_erx_stats_v1 *erx_stats =
89a88ab8 440 be_erx_stats_from_cmd(adapter);
005d5696 441 dev_stats->rx_dropped +=
89a88ab8 442 erx_stats->rx_drops_no_fragments[rxo->q.id];
005d5696 443 }
89a88ab8
AK
444 } else {
445 struct be_erx_stats_v0 *erx_stats =
446 be_erx_stats_from_cmd(adapter);
447 dev_stats->rx_dropped +=
448 erx_stats->rx_drops_no_fragments[rxo->q.id];
449 }
3abcdeda
SP
450 }
451
452 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
453 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
6b7c5b94
SP
454
455 /* bad pkts received */
89a88ab8
AK
456 dev_stats->rx_errors = drvs->rx_crc_errors +
457 drvs->rx_alignment_symbol_errors +
458 drvs->rx_in_range_errors +
459 drvs->rx_out_range_errors +
460 drvs->rx_frame_too_long +
461 drvs->rx_dropped_too_small +
462 drvs->rx_dropped_too_short +
463 drvs->rx_dropped_header_too_small +
464 drvs->rx_dropped_tcp_length +
465 drvs->rx_dropped_runt +
466 drvs->rx_tcp_checksum_errs +
467 drvs->rx_ip_checksum_errs +
468 drvs->rx_udp_checksum_errs;
68110868 469
6b7c5b94 470 /* detailed rx errors */
89a88ab8
AK
471 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
472 drvs->rx_out_range_errors +
473 drvs->rx_frame_too_long;
68110868 474
89a88ab8 475 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
476
477 /* frame alignment errors */
89a88ab8 478 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 479
6b7c5b94
SP
480 /* receiver fifo overrun */
481 /* drops_no_pbuf is no per i/f, it's per BE card */
89a88ab8
AK
482 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
483 drvs->rx_input_fifo_overflow_drop +
484 drvs->rx_drops_no_pbuf;
6b7c5b94
SP
485}
486
8788fdc2 487void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 488{
6b7c5b94
SP
489 struct net_device *netdev = adapter->netdev;
490
6b7c5b94 491 /* If link came up or went down */
a8f447bd 492 if (adapter->link_up != link_up) {
0dffc83e 493 adapter->link_speed = -1;
a8f447bd 494 if (link_up) {
6b7c5b94
SP
495 netif_carrier_on(netdev);
496 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd 497 } else {
a8f447bd
SP
498 netif_carrier_off(netdev);
499 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 500 }
a8f447bd 501 adapter->link_up = link_up;
6b7c5b94 502 }
6b7c5b94
SP
503}
504
505/* Update the EQ delay n BE based on the RX frags consumed / sec */
3abcdeda 506static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 507{
3abcdeda
SP
508 struct be_eq_obj *rx_eq = &rxo->rx_eq;
509 struct be_rx_stats *stats = &rxo->stats;
4097f663
SP
510 ulong now = jiffies;
511 u32 eqd;
512
513 if (!rx_eq->enable_aic)
514 return;
515
516 /* Wrapped around */
517 if (time_before(now, stats->rx_fps_jiffies)) {
518 stats->rx_fps_jiffies = now;
519 return;
520 }
6b7c5b94
SP
521
522 /* Update once a second */
4097f663 523 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
524 return;
525
3abcdeda 526 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
4097f663 527 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 528
4097f663 529 stats->rx_fps_jiffies = now;
3abcdeda
SP
530 stats->prev_rx_frags = stats->rx_frags;
531 eqd = stats->rx_fps / 110000;
6b7c5b94
SP
532 eqd = eqd << 3;
533 if (eqd > rx_eq->max_eqd)
534 eqd = rx_eq->max_eqd;
535 if (eqd < rx_eq->min_eqd)
536 eqd = rx_eq->min_eqd;
537 if (eqd < 10)
538 eqd = 0;
539 if (eqd != rx_eq->cur_eqd)
8788fdc2 540 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
541
542 rx_eq->cur_eqd = eqd;
543}
544
65f71b8b
SH
545static u32 be_calc_rate(u64 bytes, unsigned long ticks)
546{
547 u64 rate = bytes;
548
549 do_div(rate, ticks / HZ);
550 rate <<= 3; /* bytes/sec -> bits/sec */
551 do_div(rate, 1000000ul); /* MB/Sec */
552
553 return rate;
554}
555
4097f663
SP
556static void be_tx_rate_update(struct be_adapter *adapter)
557{
3abcdeda 558 struct be_tx_stats *stats = tx_stats(adapter);
4097f663
SP
559 ulong now = jiffies;
560
561 /* Wrapped around? */
562 if (time_before(now, stats->be_tx_jiffies)) {
563 stats->be_tx_jiffies = now;
564 return;
565 }
566
567 /* Update tx rate once in two seconds */
568 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
569 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
570 - stats->be_tx_bytes_prev,
571 now - stats->be_tx_jiffies);
4097f663
SP
572 stats->be_tx_jiffies = now;
573 stats->be_tx_bytes_prev = stats->be_tx_bytes;
574 }
575}
576
6b7c5b94 577static void be_tx_stats_update(struct be_adapter *adapter,
91992e44 578 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 579{
3abcdeda 580 struct be_tx_stats *stats = tx_stats(adapter);
6b7c5b94
SP
581 stats->be_tx_reqs++;
582 stats->be_tx_wrbs += wrb_cnt;
583 stats->be_tx_bytes += copied;
91992e44 584 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94
SP
585 if (stopped)
586 stats->be_tx_stops++;
6b7c5b94
SP
587}
588
589/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
590static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
591 bool *dummy)
6b7c5b94 592{
ebc8d2ab
DM
593 int cnt = (skb->len > skb->data_len);
594
595 cnt += skb_shinfo(skb)->nr_frags;
596
6b7c5b94
SP
597 /* to account for hdr wrb */
598 cnt++;
fe6d2a38
SP
599 if (lancer_chip(adapter) || !(cnt & 1)) {
600 *dummy = false;
601 } else {
6b7c5b94
SP
602 /* add a dummy to make it an even num */
603 cnt++;
604 *dummy = true;
fe6d2a38 605 }
6b7c5b94
SP
606 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
607 return cnt;
608}
609
610static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
611{
612 wrb->frag_pa_hi = upper_32_bits(addr);
613 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
614 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
615}
616
cc4ce020
SK
617static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
618 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 619{
cc4ce020
SK
620 u8 vlan_prio = 0;
621 u16 vlan_tag = 0;
622
6b7c5b94
SP
623 memset(hdr, 0, sizeof(*hdr));
624
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
626
49e4b847 627 if (skb_is_gso(skb)) {
6b7c5b94
SP
628 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
629 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
630 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 631 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 632 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
633 if (lancer_chip(adapter) && adapter->sli_family ==
634 LANCER_A0_SLI_FAMILY) {
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
636 if (is_tcp_pkt(skb))
637 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
638 tcpcs, hdr, 1);
639 else if (is_udp_pkt(skb))
640 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
641 udpcs, hdr, 1);
642 }
6b7c5b94
SP
643 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
644 if (is_tcp_pkt(skb))
645 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
646 else if (is_udp_pkt(skb))
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
648 }
649
cc4ce020 650 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
6b7c5b94 651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
652 vlan_tag = vlan_tx_tag_get(skb);
653 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
654 /* If vlan priority provided by OS is NOT in available bmap */
655 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
656 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
657 adapter->recommended_prio;
658 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
659 }
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
665}
666
2b7bcebf 667static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
668 bool unmap_single)
669{
670 dma_addr_t dma;
671
672 be_dws_le_to_cpu(wrb, sizeof(*wrb));
673
674 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 675 if (wrb->frag_len) {
7101e111 676 if (unmap_single)
2b7bcebf
IV
677 dma_unmap_single(dev, dma, wrb->frag_len,
678 DMA_TO_DEVICE);
7101e111 679 else
2b7bcebf 680 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
681 }
682}
6b7c5b94
SP
683
684static int make_tx_wrbs(struct be_adapter *adapter,
685 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
686{
7101e111
SP
687 dma_addr_t busaddr;
688 int i, copied = 0;
2b7bcebf 689 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
690 struct sk_buff *first_skb = skb;
691 struct be_queue_info *txq = &adapter->tx_obj.q;
692 struct be_eth_wrb *wrb;
693 struct be_eth_hdr_wrb *hdr;
7101e111
SP
694 bool map_single = false;
695 u16 map_head;
6b7c5b94 696
6b7c5b94
SP
697 hdr = queue_head_node(txq);
698 queue_head_inc(txq);
7101e111 699 map_head = txq->head;
6b7c5b94 700
ebc8d2ab 701 if (skb->len > skb->data_len) {
e743d313 702 int len = skb_headlen(skb);
2b7bcebf
IV
703 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704 if (dma_mapping_error(dev, busaddr))
7101e111
SP
705 goto dma_err;
706 map_single = true;
ebc8d2ab
DM
707 wrb = queue_head_node(txq);
708 wrb_fill(wrb, busaddr, len);
709 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710 queue_head_inc(txq);
711 copied += len;
712 }
6b7c5b94 713
ebc8d2ab
DM
714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
715 struct skb_frag_struct *frag =
716 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
717 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
718 frag->size, DMA_TO_DEVICE);
719 if (dma_mapping_error(dev, busaddr))
7101e111 720 goto dma_err;
ebc8d2ab
DM
721 wrb = queue_head_node(txq);
722 wrb_fill(wrb, busaddr, frag->size);
723 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724 queue_head_inc(txq);
725 copied += frag->size;
6b7c5b94
SP
726 }
727
728 if (dummy_wrb) {
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, 0, 0);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 }
734
cc4ce020 735 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
736 be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738 return copied;
7101e111
SP
739dma_err:
740 txq->head = map_head;
741 while (copied) {
742 wrb = queue_head_node(txq);
2b7bcebf 743 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
744 map_single = false;
745 copied -= wrb->frag_len;
746 queue_head_inc(txq);
747 }
748 return 0;
6b7c5b94
SP
749}
750
61357325 751static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 752 struct net_device *netdev)
6b7c5b94
SP
753{
754 struct be_adapter *adapter = netdev_priv(netdev);
755 struct be_tx_obj *tx_obj = &adapter->tx_obj;
756 struct be_queue_info *txq = &tx_obj->q;
757 u32 wrb_cnt = 0, copied = 0;
758 u32 start = txq->head;
759 bool dummy_wrb, stopped = false;
760
fe6d2a38 761 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94
SP
762
763 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
764 if (copied) {
765 /* record the sent skb in the sent_skb table */
766 BUG_ON(tx_obj->sent_skb_list[start]);
767 tx_obj->sent_skb_list[start] = skb;
768
769 /* Ensure txq has space for the next skb; Else stop the queue
770 * *BEFORE* ringing the tx doorbell, so that we serialze the
771 * tx compls of the current transmit which'll wake up the queue
772 */
7101e111 773 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
774 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
775 txq->len) {
776 netif_stop_queue(netdev);
777 stopped = true;
778 }
6b7c5b94 779
c190e3c8 780 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 781
91992e44
AK
782 be_tx_stats_update(adapter, wrb_cnt, copied,
783 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
784 } else {
785 txq->head = start;
786 dev_kfree_skb_any(skb);
6b7c5b94 787 }
6b7c5b94
SP
788 return NETDEV_TX_OK;
789}
790
791static int be_change_mtu(struct net_device *netdev, int new_mtu)
792{
793 struct be_adapter *adapter = netdev_priv(netdev);
794 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
795 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
796 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
797 dev_info(&adapter->pdev->dev,
798 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
799 BE_MIN_MTU,
800 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
801 return -EINVAL;
802 }
803 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
804 netdev->mtu, new_mtu);
805 netdev->mtu = new_mtu;
806 return 0;
807}
808
809/*
82903e4b
AK
810 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
811 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 812 */
1da87b7f 813static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 814{
6b7c5b94
SP
815 u16 vtag[BE_NUM_VLANS_SUPPORTED];
816 u16 ntags = 0, i;
82903e4b 817 int status = 0;
1da87b7f
AK
818 u32 if_handle;
819
820 if (vf) {
821 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
822 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
823 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
824 }
6b7c5b94 825
82903e4b 826 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 827 /* Construct VLAN Table to give to HW */
b738127d 828 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
829 if (adapter->vlan_tag[i]) {
830 vtag[ntags] = cpu_to_le16(i);
831 ntags++;
832 }
833 }
b31c50a7
SP
834 status = be_cmd_vlan_config(adapter, adapter->if_handle,
835 vtag, ntags, 1, 0);
6b7c5b94 836 } else {
b31c50a7
SP
837 status = be_cmd_vlan_config(adapter, adapter->if_handle,
838 NULL, 0, 1, 1);
6b7c5b94 839 }
1da87b7f 840
b31c50a7 841 return status;
6b7c5b94
SP
842}
843
844static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
845{
846 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 847
6b7c5b94 848 adapter->vlan_grp = grp;
6b7c5b94
SP
849}
850
851static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
852{
853 struct be_adapter *adapter = netdev_priv(netdev);
854
1da87b7f 855 adapter->vlans_added++;
ba343c77
SB
856 if (!be_physfn(adapter))
857 return;
858
6b7c5b94 859 adapter->vlan_tag[vid] = 1;
82903e4b 860 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 861 be_vid_config(adapter, false, 0);
6b7c5b94
SP
862}
863
864static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
865{
866 struct be_adapter *adapter = netdev_priv(netdev);
867
1da87b7f
AK
868 adapter->vlans_added--;
869 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
870
ba343c77
SB
871 if (!be_physfn(adapter))
872 return;
873
6b7c5b94 874 adapter->vlan_tag[vid] = 0;
82903e4b 875 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 876 be_vid_config(adapter, false, 0);
6b7c5b94
SP
877}
878
24307eef 879static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
880{
881 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 882
24307eef 883 if (netdev->flags & IFF_PROMISC) {
ecd0bf0f 884 be_cmd_promiscuous_config(adapter, true);
24307eef
SP
885 adapter->promiscuous = true;
886 goto done;
6b7c5b94
SP
887 }
888
25985edc 889 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
890 if (adapter->promiscuous) {
891 adapter->promiscuous = false;
ecd0bf0f 892 be_cmd_promiscuous_config(adapter, false);
6b7c5b94
SP
893 }
894
e7b909a6 895 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
896 if (netdev->flags & IFF_ALLMULTI ||
897 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 898 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 899 &adapter->mc_cmd_mem);
24307eef 900 goto done;
6b7c5b94 901 }
6b7c5b94 902
0ddf477b 903 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 904 &adapter->mc_cmd_mem);
24307eef
SP
905done:
906 return;
6b7c5b94
SP
907}
908
ba343c77
SB
909static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
910{
911 struct be_adapter *adapter = netdev_priv(netdev);
912 int status;
913
914 if (!adapter->sriov_enabled)
915 return -EPERM;
916
917 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
918 return -EINVAL;
919
64600ea5
AK
920 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
921 status = be_cmd_pmac_del(adapter,
922 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 923 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 924
64600ea5
AK
925 status = be_cmd_pmac_add(adapter, mac,
926 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 927 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
928
929 if (status)
ba343c77
SB
930 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
931 mac, vf);
64600ea5
AK
932 else
933 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
934
ba343c77
SB
935 return status;
936}
937
64600ea5
AK
938static int be_get_vf_config(struct net_device *netdev, int vf,
939 struct ifla_vf_info *vi)
940{
941 struct be_adapter *adapter = netdev_priv(netdev);
942
943 if (!adapter->sriov_enabled)
944 return -EPERM;
945
946 if (vf >= num_vfs)
947 return -EINVAL;
948
949 vi->vf = vf;
e1d18735 950 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 951 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
952 vi->qos = 0;
953 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
954
955 return 0;
956}
957
1da87b7f
AK
958static int be_set_vf_vlan(struct net_device *netdev,
959 int vf, u16 vlan, u8 qos)
960{
961 struct be_adapter *adapter = netdev_priv(netdev);
962 int status = 0;
963
964 if (!adapter->sriov_enabled)
965 return -EPERM;
966
967 if ((vf >= num_vfs) || (vlan > 4095))
968 return -EINVAL;
969
970 if (vlan) {
971 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
972 adapter->vlans_added++;
973 } else {
974 adapter->vf_cfg[vf].vf_vlan_tag = 0;
975 adapter->vlans_added--;
976 }
977
978 status = be_vid_config(adapter, true, vf);
979
980 if (status)
981 dev_info(&adapter->pdev->dev,
982 "VLAN %d config on VF %d failed\n", vlan, vf);
983 return status;
984}
985
e1d18735
AK
986static int be_set_vf_tx_rate(struct net_device *netdev,
987 int vf, int rate)
988{
989 struct be_adapter *adapter = netdev_priv(netdev);
990 int status = 0;
991
992 if (!adapter->sriov_enabled)
993 return -EPERM;
994
995 if ((vf >= num_vfs) || (rate < 0))
996 return -EINVAL;
997
998 if (rate > 10000)
999 rate = 10000;
1000
1001 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 1002 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1003
1004 if (status)
1005 dev_info(&adapter->pdev->dev,
1006 "tx rate %d on VF %d failed\n", rate, vf);
1007 return status;
1008}
1009
3abcdeda 1010static void be_rx_rate_update(struct be_rx_obj *rxo)
6b7c5b94 1011{
3abcdeda 1012 struct be_rx_stats *stats = &rxo->stats;
4097f663 1013 ulong now = jiffies;
6b7c5b94 1014
4097f663 1015 /* Wrapped around */
3abcdeda
SP
1016 if (time_before(now, stats->rx_jiffies)) {
1017 stats->rx_jiffies = now;
4097f663
SP
1018 return;
1019 }
6b7c5b94
SP
1020
1021 /* Update the rate once in two seconds */
3abcdeda 1022 if ((now - stats->rx_jiffies) < 2 * HZ)
6b7c5b94
SP
1023 return;
1024
3abcdeda
SP
1025 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1026 now - stats->rx_jiffies);
1027 stats->rx_jiffies = now;
1028 stats->rx_bytes_prev = stats->rx_bytes;
6b7c5b94
SP
1029}
1030
3abcdeda 1031static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1032 struct be_rx_compl_info *rxcp)
4097f663 1033{
3abcdeda 1034 struct be_rx_stats *stats = &rxo->stats;
1ef78abe 1035
3abcdeda 1036 stats->rx_compl++;
2e588f84
SP
1037 stats->rx_frags += rxcp->num_rcvd;
1038 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1039 stats->rx_pkts++;
2e588f84 1040 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1041 stats->rx_mcast_pkts++;
2e588f84
SP
1042 if (rxcp->err)
1043 stats->rxcp_err++;
4097f663
SP
1044}
1045
2e588f84 1046static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1047{
19fad86f
PR
1048 /* L4 checksum is not reliable for non TCP/UDP packets.
1049 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1050 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1051 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1052}
1053
6b7c5b94 1054static struct be_rx_page_info *
3abcdeda
SP
1055get_rx_page_info(struct be_adapter *adapter,
1056 struct be_rx_obj *rxo,
1057 u16 frag_idx)
6b7c5b94
SP
1058{
1059 struct be_rx_page_info *rx_page_info;
3abcdeda 1060 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1061
3abcdeda 1062 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1063 BUG_ON(!rx_page_info->page);
1064
205859a2 1065 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1066 dma_unmap_page(&adapter->pdev->dev,
1067 dma_unmap_addr(rx_page_info, bus),
1068 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1069 rx_page_info->last_page_user = false;
1070 }
6b7c5b94
SP
1071
1072 atomic_dec(&rxq->used);
1073 return rx_page_info;
1074}
1075
1076/* Throwaway the data in the Rx completion */
1077static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1078 struct be_rx_obj *rxo,
2e588f84 1079 struct be_rx_compl_info *rxcp)
6b7c5b94 1080{
3abcdeda 1081 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1082 struct be_rx_page_info *page_info;
2e588f84 1083 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1084
e80d9da6 1085 for (i = 0; i < num_rcvd; i++) {
2e588f84 1086 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1087 put_page(page_info->page);
1088 memset(page_info, 0, sizeof(*page_info));
2e588f84 1089 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1090 }
1091}
1092
1093/*
1094 * skb_fill_rx_data forms a complete skb for an ether frame
1095 * indicated by rxcp.
1096 */
3abcdeda 1097static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1098 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1099{
3abcdeda 1100 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1101 struct be_rx_page_info *page_info;
2e588f84
SP
1102 u16 i, j;
1103 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1104 u8 *start;
6b7c5b94 1105
2e588f84 1106 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1107 start = page_address(page_info->page) + page_info->page_offset;
1108 prefetch(start);
1109
1110 /* Copy data in the first descriptor of this completion */
2e588f84 1111 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1112
1113 /* Copy the header portion into skb_data */
2e588f84 1114 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1115 memcpy(skb->data, start, hdr_len);
1116 skb->len = curr_frag_len;
1117 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1118 /* Complete packet has now been moved to data */
1119 put_page(page_info->page);
1120 skb->data_len = 0;
1121 skb->tail += curr_frag_len;
1122 } else {
1123 skb_shinfo(skb)->nr_frags = 1;
1124 skb_shinfo(skb)->frags[0].page = page_info->page;
1125 skb_shinfo(skb)->frags[0].page_offset =
1126 page_info->page_offset + hdr_len;
1127 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1128 skb->data_len = curr_frag_len - hdr_len;
1129 skb->tail += hdr_len;
1130 }
205859a2 1131 page_info->page = NULL;
6b7c5b94 1132
2e588f84
SP
1133 if (rxcp->pkt_size <= rx_frag_size) {
1134 BUG_ON(rxcp->num_rcvd != 1);
1135 return;
6b7c5b94
SP
1136 }
1137
1138 /* More frags present for this completion */
2e588f84
SP
1139 index_inc(&rxcp->rxq_idx, rxq->len);
1140 remaining = rxcp->pkt_size - curr_frag_len;
1141 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1142 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1143 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1144
bd46cb6c
AK
1145 /* Coalesce all frags from the same physical page in one slot */
1146 if (page_info->page_offset == 0) {
1147 /* Fresh page */
1148 j++;
1149 skb_shinfo(skb)->frags[j].page = page_info->page;
1150 skb_shinfo(skb)->frags[j].page_offset =
1151 page_info->page_offset;
1152 skb_shinfo(skb)->frags[j].size = 0;
1153 skb_shinfo(skb)->nr_frags++;
1154 } else {
1155 put_page(page_info->page);
1156 }
1157
1158 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
1159 skb->len += curr_frag_len;
1160 skb->data_len += curr_frag_len;
6b7c5b94 1161
2e588f84
SP
1162 remaining -= curr_frag_len;
1163 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1164 page_info->page = NULL;
6b7c5b94 1165 }
bd46cb6c 1166 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1167}
1168
5be93b9a 1169/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1170static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1171 struct be_rx_obj *rxo,
2e588f84 1172 struct be_rx_compl_info *rxcp)
6b7c5b94 1173{
6332c8d3 1174 struct net_device *netdev = adapter->netdev;
6b7c5b94 1175 struct sk_buff *skb;
89420424 1176
6332c8d3 1177 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1178 if (unlikely(!skb)) {
6b7c5b94
SP
1179 if (net_ratelimit())
1180 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
3abcdeda 1181 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1182 return;
1183 }
1184
2e588f84 1185 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1186
6332c8d3 1187 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1188 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1189 else
1190 skb_checksum_none_assert(skb);
6b7c5b94
SP
1191
1192 skb->truesize = skb->len + sizeof(struct sk_buff);
6332c8d3 1193 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1194 if (adapter->netdev->features & NETIF_F_RXHASH)
1195 skb->rxhash = rxcp->rss_hash;
1196
6b7c5b94 1197
2e588f84 1198 if (unlikely(rxcp->vlanf)) {
82903e4b 1199 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
6b7c5b94
SP
1200 kfree_skb(skb);
1201 return;
1202 }
6709d952
SK
1203 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1204 rxcp->vlan_tag);
6b7c5b94
SP
1205 } else {
1206 netif_receive_skb(skb);
1207 }
6b7c5b94
SP
1208}
1209
5be93b9a
AK
1210/* Process the RX completion indicated by rxcp when GRO is enabled */
1211static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1212 struct be_rx_obj *rxo,
2e588f84 1213 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1214{
1215 struct be_rx_page_info *page_info;
5be93b9a 1216 struct sk_buff *skb = NULL;
3abcdeda
SP
1217 struct be_queue_info *rxq = &rxo->q;
1218 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1219 u16 remaining, curr_frag_len;
1220 u16 i, j;
3968fa1e 1221
5be93b9a
AK
1222 skb = napi_get_frags(&eq_obj->napi);
1223 if (!skb) {
3abcdeda 1224 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1225 return;
1226 }
1227
2e588f84
SP
1228 remaining = rxcp->pkt_size;
1229 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1230 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1231
1232 curr_frag_len = min(remaining, rx_frag_size);
1233
bd46cb6c
AK
1234 /* Coalesce all frags from the same physical page in one slot */
1235 if (i == 0 || page_info->page_offset == 0) {
1236 /* First frag or Fresh page */
1237 j++;
5be93b9a
AK
1238 skb_shinfo(skb)->frags[j].page = page_info->page;
1239 skb_shinfo(skb)->frags[j].page_offset =
1240 page_info->page_offset;
1241 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1242 } else {
1243 put_page(page_info->page);
1244 }
5be93b9a 1245 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1246
bd46cb6c 1247 remaining -= curr_frag_len;
2e588f84 1248 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1249 memset(page_info, 0, sizeof(*page_info));
1250 }
bd46cb6c 1251 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1252
5be93b9a 1253 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1254 skb->len = rxcp->pkt_size;
1255 skb->data_len = rxcp->pkt_size;
1256 skb->truesize += rxcp->pkt_size;
5be93b9a 1257 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1258 if (adapter->netdev->features & NETIF_F_RXHASH)
1259 skb->rxhash = rxcp->rss_hash;
5be93b9a 1260
2e588f84 1261 if (likely(!rxcp->vlanf))
5be93b9a 1262 napi_gro_frags(&eq_obj->napi);
2e588f84 1263 else
6709d952
SK
1264 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1265 rxcp->vlan_tag);
2e588f84
SP
1266}
1267
1268static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1269 struct be_eth_rx_compl *compl,
1270 struct be_rx_compl_info *rxcp)
1271{
1272 rxcp->pkt_size =
1273 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1274 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1275 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1276 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1277 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1278 rxcp->ip_csum =
1279 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1280 rxcp->l4_csum =
1281 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1282 rxcp->ipv6 =
1283 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1284 rxcp->rxq_idx =
1285 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1286 rxcp->num_rcvd =
1287 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1288 rxcp->pkt_type =
1289 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1290 rxcp->rss_hash =
1291 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1292 if (rxcp->vlanf) {
1293 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1294 compl);
1295 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1296 compl);
15d72184 1297 }
2e588f84
SP
1298}
1299
1300static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1301 struct be_eth_rx_compl *compl,
1302 struct be_rx_compl_info *rxcp)
1303{
1304 rxcp->pkt_size =
1305 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1306 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1307 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1308 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1309 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1310 rxcp->ip_csum =
1311 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1312 rxcp->l4_csum =
1313 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1314 rxcp->ipv6 =
1315 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1316 rxcp->rxq_idx =
1317 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1318 rxcp->num_rcvd =
1319 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1320 rxcp->pkt_type =
1321 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1322 rxcp->rss_hash =
1323 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1324 if (rxcp->vlanf) {
1325 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1326 compl);
1327 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1328 compl);
15d72184 1329 }
2e588f84
SP
1330}
1331
1332static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1333{
1334 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1335 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1336 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1337
2e588f84
SP
1338 /* For checking the valid bit it is Ok to use either definition as the
1339 * valid bit is at the same position in both v0 and v1 Rx compl */
1340 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1341 return NULL;
6b7c5b94 1342
2e588f84
SP
1343 rmb();
1344 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1345
2e588f84
SP
1346 if (adapter->be3_native)
1347 be_parse_rx_compl_v1(adapter, compl, rxcp);
1348 else
1349 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1350
15d72184
SP
1351 if (rxcp->vlanf) {
1352 /* vlanf could be wrongly set in some cards.
1353 * ignore if vtm is not set */
1354 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1355 rxcp->vlanf = 0;
6b7c5b94 1356
15d72184 1357 if (!lancer_chip(adapter))
3c709f8f 1358 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1359
3c709f8f
DM
1360 if (((adapter->pvid & VLAN_VID_MASK) ==
1361 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1362 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1363 rxcp->vlanf = 0;
1364 }
2e588f84
SP
1365
1366 /* As the compl has been parsed, reset it; we wont touch it again */
1367 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1368
3abcdeda 1369 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1370 return rxcp;
1371}
1372
1829b086 1373static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1374{
6b7c5b94 1375 u32 order = get_order(size);
1829b086 1376
6b7c5b94 1377 if (order > 0)
1829b086
ED
1378 gfp |= __GFP_COMP;
1379 return alloc_pages(gfp, order);
6b7c5b94
SP
1380}
1381
1382/*
1383 * Allocate a page, split it to fragments of size rx_frag_size and post as
1384 * receive buffers to BE
1385 */
1829b086 1386static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1387{
3abcdeda
SP
1388 struct be_adapter *adapter = rxo->adapter;
1389 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1390 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1391 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1392 struct page *pagep = NULL;
1393 struct be_eth_rx_d *rxd;
1394 u64 page_dmaaddr = 0, frag_dmaaddr;
1395 u32 posted, page_offset = 0;
1396
3abcdeda 1397 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1398 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1399 if (!pagep) {
1829b086 1400 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1401 if (unlikely(!pagep)) {
3abcdeda 1402 rxo->stats.rx_post_fail++;
6b7c5b94
SP
1403 break;
1404 }
2b7bcebf
IV
1405 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1406 0, adapter->big_page_size,
1407 DMA_FROM_DEVICE);
6b7c5b94
SP
1408 page_info->page_offset = 0;
1409 } else {
1410 get_page(pagep);
1411 page_info->page_offset = page_offset + rx_frag_size;
1412 }
1413 page_offset = page_info->page_offset;
1414 page_info->page = pagep;
fac6da5b 1415 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1416 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1417
1418 rxd = queue_head_node(rxq);
1419 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1420 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1421
1422 /* Any space left in the current big page for another frag? */
1423 if ((page_offset + rx_frag_size + rx_frag_size) >
1424 adapter->big_page_size) {
1425 pagep = NULL;
1426 page_info->last_page_user = true;
1427 }
26d92f92
SP
1428
1429 prev_page_info = page_info;
1430 queue_head_inc(rxq);
6b7c5b94
SP
1431 page_info = &page_info_tbl[rxq->head];
1432 }
1433 if (pagep)
26d92f92 1434 prev_page_info->last_page_user = true;
6b7c5b94
SP
1435
1436 if (posted) {
6b7c5b94 1437 atomic_add(posted, &rxq->used);
8788fdc2 1438 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1439 } else if (atomic_read(&rxq->used) == 0) {
1440 /* Let be_worker replenish when memory is available */
3abcdeda 1441 rxo->rx_post_starved = true;
6b7c5b94 1442 }
6b7c5b94
SP
1443}
1444
5fb379ee 1445static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1446{
6b7c5b94
SP
1447 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1448
1449 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1450 return NULL;
1451
f3eb62d2 1452 rmb();
6b7c5b94
SP
1453 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1454
1455 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1456
1457 queue_tail_inc(tx_cq);
1458 return txcp;
1459}
1460
4d586b82 1461static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
6b7c5b94
SP
1462{
1463 struct be_queue_info *txq = &adapter->tx_obj.q;
a73b796e 1464 struct be_eth_wrb *wrb;
6b7c5b94
SP
1465 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1466 struct sk_buff *sent_skb;
ec43b1a6
SP
1467 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1468 bool unmap_skb_hdr = true;
6b7c5b94 1469
ec43b1a6 1470 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1471 BUG_ON(!sent_skb);
ec43b1a6
SP
1472 sent_skbs[txq->tail] = NULL;
1473
1474 /* skip header wrb */
a73b796e 1475 queue_tail_inc(txq);
6b7c5b94 1476
ec43b1a6 1477 do {
6b7c5b94 1478 cur_index = txq->tail;
a73b796e 1479 wrb = queue_tail_node(txq);
2b7bcebf
IV
1480 unmap_tx_frag(&adapter->pdev->dev, wrb,
1481 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1482 unmap_skb_hdr = false;
1483
6b7c5b94
SP
1484 num_wrbs++;
1485 queue_tail_inc(txq);
ec43b1a6 1486 } while (cur_index != last_index);
6b7c5b94 1487
6b7c5b94 1488 kfree_skb(sent_skb);
4d586b82 1489 return num_wrbs;
6b7c5b94
SP
1490}
1491
859b1e4e
SP
1492static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1493{
1494 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1495
1496 if (!eqe->evt)
1497 return NULL;
1498
f3eb62d2 1499 rmb();
859b1e4e
SP
1500 eqe->evt = le32_to_cpu(eqe->evt);
1501 queue_tail_inc(&eq_obj->q);
1502 return eqe;
1503}
1504
1505static int event_handle(struct be_adapter *adapter,
1506 struct be_eq_obj *eq_obj)
1507{
1508 struct be_eq_entry *eqe;
1509 u16 num = 0;
1510
1511 while ((eqe = event_get(eq_obj)) != NULL) {
1512 eqe->evt = 0;
1513 num++;
1514 }
1515
1516 /* Deal with any spurious interrupts that come
1517 * without events
1518 */
1519 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1520 if (num)
1521 napi_schedule(&eq_obj->napi);
1522
1523 return num;
1524}
1525
1526/* Just read and notify events without processing them.
1527 * Used at the time of destroying event queues */
1528static void be_eq_clean(struct be_adapter *adapter,
1529 struct be_eq_obj *eq_obj)
1530{
1531 struct be_eq_entry *eqe;
1532 u16 num = 0;
1533
1534 while ((eqe = event_get(eq_obj)) != NULL) {
1535 eqe->evt = 0;
1536 num++;
1537 }
1538
1539 if (num)
1540 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1541}
1542
3abcdeda 1543static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1544{
1545 struct be_rx_page_info *page_info;
3abcdeda
SP
1546 struct be_queue_info *rxq = &rxo->q;
1547 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1548 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1549 u16 tail;
1550
1551 /* First cleanup pending rx completions */
3abcdeda
SP
1552 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1553 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1554 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1555 }
1556
1557 /* Then free posted rx buffer that were not used */
1558 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1559 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1560 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1561 put_page(page_info->page);
1562 memset(page_info, 0, sizeof(*page_info));
1563 }
1564 BUG_ON(atomic_read(&rxq->used));
1565}
1566
a8e9179a 1567static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1568{
a8e9179a 1569 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1570 struct be_queue_info *txq = &adapter->tx_obj.q;
a8e9179a 1571 struct be_eth_tx_compl *txcp;
4d586b82 1572 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1573 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1574 struct sk_buff *sent_skb;
1575 bool dummy_wrb;
a8e9179a
SP
1576
1577 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1578 do {
1579 while ((txcp = be_tx_compl_get(tx_cq))) {
1580 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1581 wrb_index, txcp);
4d586b82 1582 num_wrbs += be_tx_compl_process(adapter, end_idx);
a8e9179a
SP
1583 cmpl++;
1584 }
1585 if (cmpl) {
1586 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1587 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1588 cmpl = 0;
4d586b82 1589 num_wrbs = 0;
a8e9179a
SP
1590 }
1591
1592 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1593 break;
1594
1595 mdelay(1);
1596 } while (true);
1597
1598 if (atomic_read(&txq->used))
1599 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1600 atomic_read(&txq->used));
b03388d6
SP
1601
1602 /* free posted tx for which compls will never arrive */
1603 while (atomic_read(&txq->used)) {
1604 sent_skb = sent_skbs[txq->tail];
1605 end_idx = txq->tail;
1606 index_adv(&end_idx,
fe6d2a38
SP
1607 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1608 txq->len);
4d586b82
PR
1609 num_wrbs = be_tx_compl_process(adapter, end_idx);
1610 atomic_sub(num_wrbs, &txq->used);
b03388d6 1611 }
6b7c5b94
SP
1612}
1613
5fb379ee
SP
1614static void be_mcc_queues_destroy(struct be_adapter *adapter)
1615{
1616 struct be_queue_info *q;
5fb379ee 1617
8788fdc2 1618 q = &adapter->mcc_obj.q;
5fb379ee 1619 if (q->created)
8788fdc2 1620 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1621 be_queue_free(adapter, q);
1622
8788fdc2 1623 q = &adapter->mcc_obj.cq;
5fb379ee 1624 if (q->created)
8788fdc2 1625 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1626 be_queue_free(adapter, q);
1627}
1628
1629/* Must be called only after TX qs are created as MCC shares TX EQ */
1630static int be_mcc_queues_create(struct be_adapter *adapter)
1631{
1632 struct be_queue_info *q, *cq;
5fb379ee
SP
1633
1634 /* Alloc MCC compl queue */
8788fdc2 1635 cq = &adapter->mcc_obj.cq;
5fb379ee 1636 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1637 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1638 goto err;
1639
1640 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1641 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1642 goto mcc_cq_free;
1643
1644 /* Alloc MCC queue */
8788fdc2 1645 q = &adapter->mcc_obj.q;
5fb379ee
SP
1646 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1647 goto mcc_cq_destroy;
1648
1649 /* Ask BE to create MCC queue */
8788fdc2 1650 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1651 goto mcc_q_free;
1652
1653 return 0;
1654
1655mcc_q_free:
1656 be_queue_free(adapter, q);
1657mcc_cq_destroy:
8788fdc2 1658 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1659mcc_cq_free:
1660 be_queue_free(adapter, cq);
1661err:
1662 return -1;
1663}
1664
6b7c5b94
SP
1665static void be_tx_queues_destroy(struct be_adapter *adapter)
1666{
1667 struct be_queue_info *q;
1668
1669 q = &adapter->tx_obj.q;
a8e9179a 1670 if (q->created)
8788fdc2 1671 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
6b7c5b94
SP
1672 be_queue_free(adapter, q);
1673
1674 q = &adapter->tx_obj.cq;
1675 if (q->created)
8788fdc2 1676 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
6b7c5b94
SP
1677 be_queue_free(adapter, q);
1678
859b1e4e
SP
1679 /* Clear any residual events */
1680 be_eq_clean(adapter, &adapter->tx_eq);
1681
6b7c5b94
SP
1682 q = &adapter->tx_eq.q;
1683 if (q->created)
8788fdc2 1684 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1685 be_queue_free(adapter, q);
1686}
1687
1688static int be_tx_queues_create(struct be_adapter *adapter)
1689{
1690 struct be_queue_info *eq, *q, *cq;
1691
1692 adapter->tx_eq.max_eqd = 0;
1693 adapter->tx_eq.min_eqd = 0;
1694 adapter->tx_eq.cur_eqd = 96;
1695 adapter->tx_eq.enable_aic = false;
1696 /* Alloc Tx Event queue */
1697 eq = &adapter->tx_eq.q;
1698 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1699 return -1;
1700
1701 /* Ask BE to create Tx Event queue */
8788fdc2 1702 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
6b7c5b94 1703 goto tx_eq_free;
fe6d2a38 1704
ecd62107 1705 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1706
ba343c77 1707
6b7c5b94
SP
1708 /* Alloc TX eth compl queue */
1709 cq = &adapter->tx_obj.cq;
1710 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1711 sizeof(struct be_eth_tx_compl)))
1712 goto tx_eq_destroy;
1713
1714 /* Ask BE to create Tx eth compl queue */
8788fdc2 1715 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
6b7c5b94
SP
1716 goto tx_cq_free;
1717
1718 /* Alloc TX eth queue */
1719 q = &adapter->tx_obj.q;
1720 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1721 goto tx_cq_destroy;
1722
1723 /* Ask BE to create Tx eth queue */
8788fdc2 1724 if (be_cmd_txq_create(adapter, q, cq))
6b7c5b94
SP
1725 goto tx_q_free;
1726 return 0;
1727
1728tx_q_free:
1729 be_queue_free(adapter, q);
1730tx_cq_destroy:
8788fdc2 1731 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
6b7c5b94
SP
1732tx_cq_free:
1733 be_queue_free(adapter, cq);
1734tx_eq_destroy:
8788fdc2 1735 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
6b7c5b94
SP
1736tx_eq_free:
1737 be_queue_free(adapter, eq);
1738 return -1;
1739}
1740
1741static void be_rx_queues_destroy(struct be_adapter *adapter)
1742{
1743 struct be_queue_info *q;
3abcdeda
SP
1744 struct be_rx_obj *rxo;
1745 int i;
1746
1747 for_all_rx_queues(adapter, rxo, i) {
1748 q = &rxo->q;
1749 if (q->created) {
1750 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1751 /* After the rxq is invalidated, wait for a grace time
1752 * of 1ms for all dma to end and the flush compl to
1753 * arrive
1754 */
1755 mdelay(1);
1756 be_rx_q_clean(adapter, rxo);
1757 }
1758 be_queue_free(adapter, q);
1759
1760 q = &rxo->cq;
1761 if (q->created)
1762 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1763 be_queue_free(adapter, q);
1764
1765 /* Clear any residual events */
1766 q = &rxo->rx_eq.q;
1767 if (q->created) {
1768 be_eq_clean(adapter, &rxo->rx_eq);
1769 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1770 }
1771 be_queue_free(adapter, q);
6b7c5b94 1772 }
6b7c5b94
SP
1773}
1774
ac6a0c4a
SP
1775static u32 be_num_rxqs_want(struct be_adapter *adapter)
1776{
1777 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1778 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1779 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1780 } else {
1781 dev_warn(&adapter->pdev->dev,
1782 "No support for multiple RX queues\n");
1783 return 1;
1784 }
1785}
1786
6b7c5b94
SP
1787static int be_rx_queues_create(struct be_adapter *adapter)
1788{
1789 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1790 struct be_rx_obj *rxo;
1791 int rc, i;
6b7c5b94 1792
ac6a0c4a
SP
1793 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1794 msix_enabled(adapter) ?
1795 adapter->num_msix_vec - 1 : 1);
1796 if (adapter->num_rx_qs != MAX_RX_QS)
1797 dev_warn(&adapter->pdev->dev,
1798 "Can create only %d RX queues", adapter->num_rx_qs);
1799
6b7c5b94 1800 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1801 for_all_rx_queues(adapter, rxo, i) {
1802 rxo->adapter = adapter;
1803 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1804 rxo->rx_eq.enable_aic = true;
1805
1806 /* EQ */
1807 eq = &rxo->rx_eq.q;
1808 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1809 sizeof(struct be_eq_entry));
1810 if (rc)
1811 goto err;
1812
1813 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1814 if (rc)
1815 goto err;
1816
ecd62107 1817 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1818
3abcdeda
SP
1819 /* CQ */
1820 cq = &rxo->cq;
1821 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1822 sizeof(struct be_eth_rx_compl));
1823 if (rc)
1824 goto err;
1825
1826 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1827 if (rc)
1828 goto err;
3abcdeda
SP
1829 /* Rx Q */
1830 q = &rxo->q;
1831 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1832 sizeof(struct be_eth_rx_d));
1833 if (rc)
1834 goto err;
1835
1836 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1837 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1838 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1839 if (rc)
1840 goto err;
1841 }
1842
1843 if (be_multi_rxq(adapter)) {
1844 u8 rsstable[MAX_RSS_QS];
1845
1846 for_all_rss_queues(adapter, rxo, i)
1847 rsstable[i] = rxo->rss_id;
1848
1849 rc = be_cmd_rss_config(adapter, rsstable,
1850 adapter->num_rx_qs - 1);
1851 if (rc)
1852 goto err;
1853 }
6b7c5b94
SP
1854
1855 return 0;
3abcdeda
SP
1856err:
1857 be_rx_queues_destroy(adapter);
1858 return -1;
6b7c5b94 1859}
6b7c5b94 1860
fe6d2a38 1861static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1862{
fe6d2a38
SP
1863 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1864 if (!eqe->evt)
1865 return false;
1866 else
1867 return true;
b628bde2
SP
1868}
1869
6b7c5b94
SP
1870static irqreturn_t be_intx(int irq, void *dev)
1871{
1872 struct be_adapter *adapter = dev;
3abcdeda 1873 struct be_rx_obj *rxo;
fe6d2a38 1874 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1875
fe6d2a38
SP
1876 if (lancer_chip(adapter)) {
1877 if (event_peek(&adapter->tx_eq))
1878 tx = event_handle(adapter, &adapter->tx_eq);
1879 for_all_rx_queues(adapter, rxo, i) {
1880 if (event_peek(&rxo->rx_eq))
1881 rx |= event_handle(adapter, &rxo->rx_eq);
1882 }
6b7c5b94 1883
fe6d2a38
SP
1884 if (!(tx || rx))
1885 return IRQ_NONE;
3abcdeda 1886
fe6d2a38
SP
1887 } else {
1888 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1889 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1890 if (!isr)
1891 return IRQ_NONE;
1892
ecd62107 1893 if ((1 << adapter->tx_eq.eq_idx & isr))
fe6d2a38
SP
1894 event_handle(adapter, &adapter->tx_eq);
1895
1896 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1897 if ((1 << rxo->rx_eq.eq_idx & isr))
fe6d2a38
SP
1898 event_handle(adapter, &rxo->rx_eq);
1899 }
3abcdeda 1900 }
c001c213 1901
8788fdc2 1902 return IRQ_HANDLED;
6b7c5b94
SP
1903}
1904
1905static irqreturn_t be_msix_rx(int irq, void *dev)
1906{
3abcdeda
SP
1907 struct be_rx_obj *rxo = dev;
1908 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1909
3abcdeda 1910 event_handle(adapter, &rxo->rx_eq);
6b7c5b94
SP
1911
1912 return IRQ_HANDLED;
1913}
1914
5fb379ee 1915static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1916{
1917 struct be_adapter *adapter = dev;
1918
8788fdc2 1919 event_handle(adapter, &adapter->tx_eq);
6b7c5b94
SP
1920
1921 return IRQ_HANDLED;
1922}
1923
2e588f84 1924static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1925{
2e588f84 1926 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1927}
1928
49b05221 1929static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1930{
1931 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1932 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1933 struct be_adapter *adapter = rxo->adapter;
1934 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1935 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1936 u32 work_done;
1937
3abcdeda 1938 rxo->stats.rx_polls++;
6b7c5b94 1939 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1940 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1941 if (!rxcp)
1942 break;
1943
e80d9da6 1944 /* Ignore flush completions */
009dd872 1945 if (rxcp->num_rcvd && rxcp->pkt_size) {
2e588f84 1946 if (do_gro(rxcp))
64642811
SP
1947 be_rx_compl_process_gro(adapter, rxo, rxcp);
1948 else
1949 be_rx_compl_process(adapter, rxo, rxcp);
009dd872
PR
1950 } else if (rxcp->pkt_size == 0) {
1951 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1952 }
009dd872 1953
2e588f84 1954 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1955 }
1956
6b7c5b94 1957 /* Refill the queue */
3abcdeda 1958 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1959 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1960
1961 /* All consumed */
1962 if (work_done < budget) {
1963 napi_complete(napi);
8788fdc2 1964 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1965 } else {
1966 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1967 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1968 }
1969 return work_done;
1970}
1971
f31e50a8
SP
1972/* As TX and MCC share the same EQ check for both TX and MCC completions.
1973 * For TX/MCC we don't honour budget; consume everything
1974 */
1975static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1976{
f31e50a8
SP
1977 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1978 struct be_adapter *adapter =
1979 container_of(tx_eq, struct be_adapter, tx_eq);
5fb379ee
SP
1980 struct be_queue_info *txq = &adapter->tx_obj.q;
1981 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1982 struct be_eth_tx_compl *txcp;
f31e50a8 1983 int tx_compl = 0, mcc_compl, status = 0;
4d586b82 1984 u16 end_idx, num_wrbs = 0;
6b7c5b94 1985
5fb379ee 1986 while ((txcp = be_tx_compl_get(tx_cq))) {
6b7c5b94 1987 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
f31e50a8 1988 wrb_index, txcp);
4d586b82 1989 num_wrbs += be_tx_compl_process(adapter, end_idx);
f31e50a8 1990 tx_compl++;
6b7c5b94
SP
1991 }
1992
f31e50a8
SP
1993 mcc_compl = be_process_mcc(adapter, &status);
1994
1995 napi_complete(napi);
1996
1997 if (mcc_compl) {
1998 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1999 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2000 }
2001
2002 if (tx_compl) {
2003 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
5fb379ee 2004
4d586b82
PR
2005 atomic_sub(num_wrbs, &txq->used);
2006
5fb379ee
SP
2007 /* As Tx wrbs have been freed up, wake up netdev queue if
2008 * it was stopped due to lack of tx wrbs.
2009 */
2010 if (netif_queue_stopped(adapter->netdev) &&
6b7c5b94 2011 atomic_read(&txq->used) < txq->len / 2) {
5fb379ee
SP
2012 netif_wake_queue(adapter->netdev);
2013 }
2014
3abcdeda
SP
2015 tx_stats(adapter)->be_tx_events++;
2016 tx_stats(adapter)->be_tx_compl += tx_compl;
6b7c5b94 2017 }
6b7c5b94
SP
2018
2019 return 1;
2020}
2021
d053de91 2022void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
2023{
2024 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2025 u32 i;
2026
2027 pci_read_config_dword(adapter->pdev,
2028 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2029 pci_read_config_dword(adapter->pdev,
2030 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2031 pci_read_config_dword(adapter->pdev,
2032 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2033 pci_read_config_dword(adapter->pdev,
2034 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2035
2036 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2037 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2038
d053de91
AK
2039 if (ue_status_lo || ue_status_hi) {
2040 adapter->ue_detected = true;
7acc2087 2041 adapter->eeh_err = true;
d053de91
AK
2042 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2043 }
2044
7c185276
AK
2045 if (ue_status_lo) {
2046 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2047 if (ue_status_lo & 1)
2048 dev_err(&adapter->pdev->dev,
2049 "UE: %s bit set\n", ue_status_low_desc[i]);
2050 }
2051 }
2052 if (ue_status_hi) {
2053 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2054 if (ue_status_hi & 1)
2055 dev_err(&adapter->pdev->dev,
2056 "UE: %s bit set\n", ue_status_hi_desc[i]);
2057 }
2058 }
2059
2060}
2061
ea1dae11
SP
2062static void be_worker(struct work_struct *work)
2063{
2064 struct be_adapter *adapter =
2065 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
2066 struct be_rx_obj *rxo;
2067 int i;
ea1dae11 2068
16da8250
SP
2069 if (!adapter->ue_detected && !lancer_chip(adapter))
2070 be_detect_dump_ue(adapter);
2071
f203af70
SK
2072 /* when interrupts are not yet enabled, just reap any pending
2073 * mcc completions */
2074 if (!netif_running(adapter->netdev)) {
2075 int mcc_compl, status = 0;
2076
2077 mcc_compl = be_process_mcc(adapter, &status);
2078
2079 if (mcc_compl) {
2080 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2081 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2082 }
9b037f38 2083
f203af70
SK
2084 goto reschedule;
2085 }
2086
005d5696
SX
2087 if (!adapter->stats_cmd_sent) {
2088 if (lancer_chip(adapter))
2089 lancer_cmd_get_pport_stats(adapter,
2090 &adapter->stats_cmd);
2091 else
2092 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2093 }
4097f663 2094 be_tx_rate_update(adapter);
4097f663 2095
3abcdeda
SP
2096 for_all_rx_queues(adapter, rxo, i) {
2097 be_rx_rate_update(rxo);
2098 be_rx_eqd_update(adapter, rxo);
2099
2100 if (rxo->rx_post_starved) {
2101 rxo->rx_post_starved = false;
1829b086 2102 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 2103 }
ea1dae11
SP
2104 }
2105
f203af70 2106reschedule:
e74fbd03 2107 adapter->work_counter++;
ea1dae11
SP
2108 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2109}
2110
8d56ff11
SP
2111static void be_msix_disable(struct be_adapter *adapter)
2112{
ac6a0c4a 2113 if (msix_enabled(adapter)) {
8d56ff11 2114 pci_disable_msix(adapter->pdev);
ac6a0c4a 2115 adapter->num_msix_vec = 0;
3abcdeda
SP
2116 }
2117}
2118
6b7c5b94
SP
2119static void be_msix_enable(struct be_adapter *adapter)
2120{
3abcdeda 2121#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2122 int i, status, num_vec;
6b7c5b94 2123
ac6a0c4a 2124 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2125
ac6a0c4a 2126 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2127 adapter->msix_entries[i].entry = i;
2128
ac6a0c4a 2129 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2130 if (status == 0) {
2131 goto done;
2132 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2133 num_vec = status;
3abcdeda 2134 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2135 num_vec) == 0)
3abcdeda 2136 goto done;
3abcdeda
SP
2137 }
2138 return;
2139done:
ac6a0c4a
SP
2140 adapter->num_msix_vec = num_vec;
2141 return;
6b7c5b94
SP
2142}
2143
ba343c77
SB
2144static void be_sriov_enable(struct be_adapter *adapter)
2145{
344dbf10 2146 be_check_sriov_fn_type(adapter);
6dedec81 2147#ifdef CONFIG_PCI_IOV
ba343c77 2148 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2149 int status, pos;
2150 u16 nvfs;
2151
2152 pos = pci_find_ext_capability(adapter->pdev,
2153 PCI_EXT_CAP_ID_SRIOV);
2154 pci_read_config_word(adapter->pdev,
2155 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2156
2157 if (num_vfs > nvfs) {
2158 dev_info(&adapter->pdev->dev,
2159 "Device supports %d VFs and not %d\n",
2160 nvfs, num_vfs);
2161 num_vfs = nvfs;
2162 }
6dedec81 2163
ba343c77
SB
2164 status = pci_enable_sriov(adapter->pdev, num_vfs);
2165 adapter->sriov_enabled = status ? false : true;
2166 }
2167#endif
ba343c77
SB
2168}
2169
2170static void be_sriov_disable(struct be_adapter *adapter)
2171{
2172#ifdef CONFIG_PCI_IOV
2173 if (adapter->sriov_enabled) {
2174 pci_disable_sriov(adapter->pdev);
2175 adapter->sriov_enabled = false;
2176 }
2177#endif
2178}
2179
fe6d2a38
SP
2180static inline int be_msix_vec_get(struct be_adapter *adapter,
2181 struct be_eq_obj *eq_obj)
6b7c5b94 2182{
ecd62107 2183 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2184}
2185
b628bde2
SP
2186static int be_request_irq(struct be_adapter *adapter,
2187 struct be_eq_obj *eq_obj,
3abcdeda 2188 void *handler, char *desc, void *context)
6b7c5b94
SP
2189{
2190 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2191 int vec;
2192
2193 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2194 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2195 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2196}
2197
3abcdeda
SP
2198static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2199 void *context)
b628bde2 2200{
fe6d2a38 2201 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2202 free_irq(vec, context);
b628bde2 2203}
6b7c5b94 2204
b628bde2
SP
2205static int be_msix_register(struct be_adapter *adapter)
2206{
3abcdeda
SP
2207 struct be_rx_obj *rxo;
2208 int status, i;
2209 char qname[10];
b628bde2 2210
3abcdeda
SP
2211 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2212 adapter);
6b7c5b94
SP
2213 if (status)
2214 goto err;
2215
3abcdeda
SP
2216 for_all_rx_queues(adapter, rxo, i) {
2217 sprintf(qname, "rxq%d", i);
2218 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2219 qname, rxo);
2220 if (status)
2221 goto err_msix;
2222 }
b628bde2 2223
6b7c5b94 2224 return 0;
b628bde2 2225
3abcdeda
SP
2226err_msix:
2227 be_free_irq(adapter, &adapter->tx_eq, adapter);
2228
2229 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2230 be_free_irq(adapter, &rxo->rx_eq, rxo);
2231
6b7c5b94
SP
2232err:
2233 dev_warn(&adapter->pdev->dev,
2234 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2235 be_msix_disable(adapter);
6b7c5b94
SP
2236 return status;
2237}
2238
2239static int be_irq_register(struct be_adapter *adapter)
2240{
2241 struct net_device *netdev = adapter->netdev;
2242 int status;
2243
ac6a0c4a 2244 if (msix_enabled(adapter)) {
6b7c5b94
SP
2245 status = be_msix_register(adapter);
2246 if (status == 0)
2247 goto done;
ba343c77
SB
2248 /* INTx is not supported for VF */
2249 if (!be_physfn(adapter))
2250 return status;
6b7c5b94
SP
2251 }
2252
2253 /* INTx */
2254 netdev->irq = adapter->pdev->irq;
2255 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2256 adapter);
2257 if (status) {
2258 dev_err(&adapter->pdev->dev,
2259 "INTx request IRQ failed - err %d\n", status);
2260 return status;
2261 }
2262done:
2263 adapter->isr_registered = true;
2264 return 0;
2265}
2266
2267static void be_irq_unregister(struct be_adapter *adapter)
2268{
2269 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2270 struct be_rx_obj *rxo;
2271 int i;
6b7c5b94
SP
2272
2273 if (!adapter->isr_registered)
2274 return;
2275
2276 /* INTx */
ac6a0c4a 2277 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2278 free_irq(netdev->irq, adapter);
2279 goto done;
2280 }
2281
2282 /* MSIx */
3abcdeda
SP
2283 be_free_irq(adapter, &adapter->tx_eq, adapter);
2284
2285 for_all_rx_queues(adapter, rxo, i)
2286 be_free_irq(adapter, &rxo->rx_eq, rxo);
2287
6b7c5b94
SP
2288done:
2289 adapter->isr_registered = false;
6b7c5b94
SP
2290}
2291
889cd4b2
SP
2292static int be_close(struct net_device *netdev)
2293{
2294 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2295 struct be_rx_obj *rxo;
889cd4b2 2296 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2297 int vec, i;
889cd4b2 2298
889cd4b2
SP
2299 be_async_mcc_disable(adapter);
2300
889cd4b2
SP
2301 netif_carrier_off(netdev);
2302 adapter->link_up = false;
2303
fe6d2a38
SP
2304 if (!lancer_chip(adapter))
2305 be_intr_set(adapter, false);
889cd4b2 2306
63fcb27f
PR
2307 for_all_rx_queues(adapter, rxo, i)
2308 napi_disable(&rxo->rx_eq.napi);
2309
2310 napi_disable(&tx_eq->napi);
2311
2312 if (lancer_chip(adapter)) {
2313 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2314 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2315 for_all_rx_queues(adapter, rxo, i)
2316 be_cq_notify(adapter, rxo->cq.id, false, 0);
2317 }
2318
ac6a0c4a 2319 if (msix_enabled(adapter)) {
fe6d2a38 2320 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2321 synchronize_irq(vec);
3abcdeda
SP
2322
2323 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2324 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2325 synchronize_irq(vec);
2326 }
889cd4b2
SP
2327 } else {
2328 synchronize_irq(netdev->irq);
2329 }
2330 be_irq_unregister(adapter);
2331
889cd4b2
SP
2332 /* Wait for all pending tx completions to arrive so that
2333 * all tx skbs are freed.
2334 */
2335 be_tx_compl_clean(adapter);
2336
2337 return 0;
2338}
2339
6b7c5b94
SP
2340static int be_open(struct net_device *netdev)
2341{
2342 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2343 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2344 struct be_rx_obj *rxo;
a8f447bd 2345 bool link_up;
3abcdeda 2346 int status, i;
0388f251
SB
2347 u8 mac_speed;
2348 u16 link_speed;
5fb379ee 2349
3abcdeda 2350 for_all_rx_queues(adapter, rxo, i) {
1829b086 2351 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda
SP
2352 napi_enable(&rxo->rx_eq.napi);
2353 }
5fb379ee
SP
2354 napi_enable(&tx_eq->napi);
2355
2356 be_irq_register(adapter);
2357
fe6d2a38
SP
2358 if (!lancer_chip(adapter))
2359 be_intr_set(adapter, true);
5fb379ee
SP
2360
2361 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2362 for_all_rx_queues(adapter, rxo, i) {
2363 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2364 be_cq_notify(adapter, rxo->cq.id, true, 0);
2365 }
8788fdc2 2366 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2367
7a1e9b20
SP
2368 /* Now that interrupts are on we can process async mcc */
2369 be_async_mcc_enable(adapter);
2370
0388f251 2371 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
187e8756 2372 &link_speed, 0);
a8f447bd 2373 if (status)
889cd4b2 2374 goto err;
a8f447bd 2375 be_link_status_update(adapter, link_up);
5fb379ee 2376
889cd4b2 2377 if (be_physfn(adapter)) {
1da87b7f 2378 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2379 if (status)
2380 goto err;
4f2aa89c 2381
ba343c77
SB
2382 status = be_cmd_set_flow_control(adapter,
2383 adapter->tx_fc, adapter->rx_fc);
2384 if (status)
889cd4b2 2385 goto err;
ba343c77 2386 }
4f2aa89c 2387
889cd4b2
SP
2388 return 0;
2389err:
2390 be_close(adapter->netdev);
2391 return -EIO;
5fb379ee
SP
2392}
2393
71d8d1b5
AK
2394static int be_setup_wol(struct be_adapter *adapter, bool enable)
2395{
2396 struct be_dma_mem cmd;
2397 int status = 0;
2398 u8 mac[ETH_ALEN];
2399
2400 memset(mac, 0, ETH_ALEN);
2401
2402 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2403 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2404 GFP_KERNEL);
71d8d1b5
AK
2405 if (cmd.va == NULL)
2406 return -1;
2407 memset(cmd.va, 0, cmd.size);
2408
2409 if (enable) {
2410 status = pci_write_config_dword(adapter->pdev,
2411 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2412 if (status) {
2413 dev_err(&adapter->pdev->dev,
2381a55c 2414 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2415 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2416 cmd.dma);
71d8d1b5
AK
2417 return status;
2418 }
2419 status = be_cmd_enable_magic_wol(adapter,
2420 adapter->netdev->dev_addr, &cmd);
2421 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2422 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2423 } else {
2424 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2425 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2426 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2427 }
2428
2b7bcebf 2429 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2430 return status;
2431}
2432
6d87f5c3
AK
2433/*
2434 * Generate a seed MAC address from the PF MAC Address using jhash.
2435 * MAC Address for VFs are assigned incrementally starting from the seed.
2436 * These addresses are programmed in the ASIC by the PF and the VF driver
2437 * queries for the MAC address during its probe.
2438 */
2439static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2440{
2441 u32 vf = 0;
3abcdeda 2442 int status = 0;
6d87f5c3
AK
2443 u8 mac[ETH_ALEN];
2444
2445 be_vf_eth_addr_generate(adapter, mac);
2446
2447 for (vf = 0; vf < num_vfs; vf++) {
2448 status = be_cmd_pmac_add(adapter, mac,
2449 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2450 &adapter->vf_cfg[vf].vf_pmac_id,
2451 vf + 1);
6d87f5c3
AK
2452 if (status)
2453 dev_err(&adapter->pdev->dev,
2454 "Mac address add failed for VF %d\n", vf);
2455 else
2456 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2457
2458 mac[5] += 1;
2459 }
2460 return status;
2461}
2462
2463static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2464{
2465 u32 vf;
2466
2467 for (vf = 0; vf < num_vfs; vf++) {
2468 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2469 be_cmd_pmac_del(adapter,
2470 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2471 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2472 }
2473}
2474
5fb379ee
SP
2475static int be_setup(struct be_adapter *adapter)
2476{
5fb379ee 2477 struct net_device *netdev = adapter->netdev;
ba343c77 2478 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2479 int status;
ba343c77
SB
2480 u8 mac[ETH_ALEN];
2481
f21b538c
PR
2482 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2483 BE_IF_FLAGS_BROADCAST |
2484 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2485
ba343c77
SB
2486 if (be_physfn(adapter)) {
2487 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2488 BE_IF_FLAGS_PROMISCUOUS |
2489 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2490 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2491
ac6a0c4a 2492 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2493 cap_flags |= BE_IF_FLAGS_RSS;
2494 en_flags |= BE_IF_FLAGS_RSS;
2495 }
ba343c77 2496 }
73d540f2
SP
2497
2498 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2499 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2500 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2501 if (status != 0)
2502 goto do_none;
2503
ba343c77 2504 if (be_physfn(adapter)) {
c99ac3e7
AK
2505 if (adapter->sriov_enabled) {
2506 while (vf < num_vfs) {
2507 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2508 BE_IF_FLAGS_BROADCAST;
2509 status = be_cmd_if_create(adapter, cap_flags,
2510 en_flags, mac, true,
64600ea5 2511 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2512 NULL, vf+1);
c99ac3e7
AK
2513 if (status) {
2514 dev_err(&adapter->pdev->dev,
2515 "Interface Create failed for VF %d\n",
2516 vf);
2517 goto if_destroy;
2518 }
2519 adapter->vf_cfg[vf].vf_pmac_id =
2520 BE_INVALID_PMAC_ID;
2521 vf++;
ba343c77 2522 }
84e5b9f7 2523 }
c99ac3e7 2524 } else {
ba343c77
SB
2525 status = be_cmd_mac_addr_query(adapter, mac,
2526 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2527 if (!status) {
2528 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2529 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2530 }
2531 }
2532
6b7c5b94
SP
2533 status = be_tx_queues_create(adapter);
2534 if (status != 0)
2535 goto if_destroy;
2536
2537 status = be_rx_queues_create(adapter);
2538 if (status != 0)
2539 goto tx_qs_destroy;
2540
5fb379ee
SP
2541 status = be_mcc_queues_create(adapter);
2542 if (status != 0)
2543 goto rx_qs_destroy;
6b7c5b94 2544
0dffc83e
AK
2545 adapter->link_speed = -1;
2546
6b7c5b94
SP
2547 return 0;
2548
5fb379ee
SP
2549rx_qs_destroy:
2550 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2551tx_qs_destroy:
2552 be_tx_queues_destroy(adapter);
2553if_destroy:
c99ac3e7
AK
2554 if (be_physfn(adapter) && adapter->sriov_enabled)
2555 for (vf = 0; vf < num_vfs; vf++)
2556 if (adapter->vf_cfg[vf].vf_if_handle)
2557 be_cmd_if_destroy(adapter,
658681f7
AK
2558 adapter->vf_cfg[vf].vf_if_handle,
2559 vf + 1);
2560 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2561do_none:
2562 return status;
2563}
2564
5fb379ee
SP
2565static int be_clear(struct be_adapter *adapter)
2566{
7ab8b0b4
AK
2567 int vf;
2568
c99ac3e7 2569 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2570 be_vf_eth_addr_rem(adapter);
2571
1a8887d8 2572 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2573 be_rx_queues_destroy(adapter);
2574 be_tx_queues_destroy(adapter);
1f5db833 2575 adapter->eq_next_idx = 0;
5fb379ee 2576
7ab8b0b4
AK
2577 if (be_physfn(adapter) && adapter->sriov_enabled)
2578 for (vf = 0; vf < num_vfs; vf++)
2579 if (adapter->vf_cfg[vf].vf_if_handle)
2580 be_cmd_if_destroy(adapter,
2581 adapter->vf_cfg[vf].vf_if_handle,
2582 vf + 1);
2583
658681f7 2584 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2585
2243e2e9
SP
2586 /* tell fw we're done with firing cmds */
2587 be_cmd_fw_clean(adapter);
5fb379ee
SP
2588 return 0;
2589}
2590
6b7c5b94 2591
84517482 2592#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2593static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2594 const u8 *p, u32 img_start, int image_size,
2595 int hdr_size)
fa9a6fed
SB
2596{
2597 u32 crc_offset;
2598 u8 flashed_crc[4];
2599 int status;
3f0d4560
AK
2600
2601 crc_offset = hdr_size + img_start + image_size - 4;
2602
fa9a6fed 2603 p += crc_offset;
3f0d4560
AK
2604
2605 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2606 (image_size - 4));
fa9a6fed
SB
2607 if (status) {
2608 dev_err(&adapter->pdev->dev,
2609 "could not get crc from flash, not flashing redboot\n");
2610 return false;
2611 }
2612
2613 /*update redboot only if crc does not match*/
2614 if (!memcmp(flashed_crc, p, 4))
2615 return false;
2616 else
2617 return true;
fa9a6fed
SB
2618}
2619
3f0d4560 2620static int be_flash_data(struct be_adapter *adapter,
84517482 2621 const struct firmware *fw,
3f0d4560
AK
2622 struct be_dma_mem *flash_cmd, int num_of_images)
2623
84517482 2624{
3f0d4560
AK
2625 int status = 0, i, filehdr_size = 0;
2626 u32 total_bytes = 0, flash_op;
84517482
AK
2627 int num_bytes;
2628 const u8 *p = fw->data;
2629 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2630 const struct flash_comp *pflashcomp;
9fe96934 2631 int num_comp;
3f0d4560 2632
215faf9c 2633 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2634 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2635 FLASH_IMAGE_MAX_SIZE_g3},
2636 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2637 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2638 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2639 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2640 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2641 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2642 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2643 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2644 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2645 FLASH_IMAGE_MAX_SIZE_g3},
2646 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2647 FLASH_IMAGE_MAX_SIZE_g3},
2648 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2649 FLASH_IMAGE_MAX_SIZE_g3},
2650 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2651 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2652 };
215faf9c 2653 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2654 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2655 FLASH_IMAGE_MAX_SIZE_g2},
2656 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2657 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2658 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2659 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2660 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2661 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2662 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2663 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2664 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2665 FLASH_IMAGE_MAX_SIZE_g2},
2666 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2667 FLASH_IMAGE_MAX_SIZE_g2},
2668 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2669 FLASH_IMAGE_MAX_SIZE_g2}
2670 };
2671
2672 if (adapter->generation == BE_GEN3) {
2673 pflashcomp = gen3_flash_types;
2674 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2675 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2676 } else {
2677 pflashcomp = gen2_flash_types;
2678 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2679 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2680 }
9fe96934
SB
2681 for (i = 0; i < num_comp; i++) {
2682 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2683 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2684 continue;
3f0d4560
AK
2685 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2686 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2687 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2688 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2689 continue;
2690 p = fw->data;
2691 p += filehdr_size + pflashcomp[i].offset
2692 + (num_of_images * sizeof(struct image_hdr));
2693 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2694 return -1;
3f0d4560
AK
2695 total_bytes = pflashcomp[i].size;
2696 while (total_bytes) {
2697 if (total_bytes > 32*1024)
2698 num_bytes = 32*1024;
2699 else
2700 num_bytes = total_bytes;
2701 total_bytes -= num_bytes;
2702
2703 if (!total_bytes)
2704 flash_op = FLASHROM_OPER_FLASH;
2705 else
2706 flash_op = FLASHROM_OPER_SAVE;
2707 memcpy(req->params.data_buf, p, num_bytes);
2708 p += num_bytes;
2709 status = be_cmd_write_flashrom(adapter, flash_cmd,
2710 pflashcomp[i].optype, flash_op, num_bytes);
2711 if (status) {
2712 dev_err(&adapter->pdev->dev,
2713 "cmd to write to flash rom failed.\n");
2714 return -1;
2715 }
84517482 2716 }
84517482 2717 }
84517482
AK
2718 return 0;
2719}
2720
3f0d4560
AK
2721static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2722{
2723 if (fhdr == NULL)
2724 return 0;
2725 if (fhdr->build[0] == '3')
2726 return BE_GEN3;
2727 else if (fhdr->build[0] == '2')
2728 return BE_GEN2;
2729 else
2730 return 0;
2731}
2732
485bf569
SN
2733static int lancer_fw_download(struct be_adapter *adapter,
2734 const struct firmware *fw)
84517482 2735{
485bf569
SN
2736#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2737#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2738 struct be_dma_mem flash_cmd;
485bf569
SN
2739 struct lancer_cmd_req_write_object *req;
2740 const u8 *data_ptr = NULL;
2741 u8 *dest_image_ptr = NULL;
2742 size_t image_size = 0;
2743 u32 chunk_size = 0;
2744 u32 data_written = 0;
2745 u32 offset = 0;
2746 int status = 0;
2747 u8 add_status = 0;
84517482 2748
485bf569 2749 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2750 dev_err(&adapter->pdev->dev,
485bf569
SN
2751 "FW Image not properly aligned. "
2752 "Length must be 4 byte aligned.\n");
2753 status = -EINVAL;
2754 goto lancer_fw_exit;
d9efd2af
SB
2755 }
2756
485bf569
SN
2757 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2758 + LANCER_FW_DOWNLOAD_CHUNK;
2759 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2760 &flash_cmd.dma, GFP_KERNEL);
2761 if (!flash_cmd.va) {
2762 status = -ENOMEM;
2763 dev_err(&adapter->pdev->dev,
2764 "Memory allocation failure while flashing\n");
2765 goto lancer_fw_exit;
2766 }
84517482 2767
485bf569
SN
2768 req = flash_cmd.va;
2769 dest_image_ptr = flash_cmd.va +
2770 sizeof(struct lancer_cmd_req_write_object);
2771 image_size = fw->size;
2772 data_ptr = fw->data;
2773
2774 while (image_size) {
2775 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2776
2777 /* Copy the image chunk content. */
2778 memcpy(dest_image_ptr, data_ptr, chunk_size);
2779
2780 status = lancer_cmd_write_object(adapter, &flash_cmd,
2781 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2782 &data_written, &add_status);
2783
2784 if (status)
2785 break;
2786
2787 offset += data_written;
2788 data_ptr += data_written;
2789 image_size -= data_written;
2790 }
2791
2792 if (!status) {
2793 /* Commit the FW written */
2794 status = lancer_cmd_write_object(adapter, &flash_cmd,
2795 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2796 &data_written, &add_status);
2797 }
2798
2799 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2800 flash_cmd.dma);
2801 if (status) {
2802 dev_err(&adapter->pdev->dev,
2803 "Firmware load error. "
2804 "Status code: 0x%x Additional Status: 0x%x\n",
2805 status, add_status);
2806 goto lancer_fw_exit;
2807 }
2808
2809 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2810lancer_fw_exit:
2811 return status;
2812}
2813
2814static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2815{
2816 struct flash_file_hdr_g2 *fhdr;
2817 struct flash_file_hdr_g3 *fhdr3;
2818 struct image_hdr *img_hdr_ptr = NULL;
2819 struct be_dma_mem flash_cmd;
2820 const u8 *p;
2821 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2822
2823 p = fw->data;
3f0d4560 2824 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2825
84517482 2826 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2827 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2828 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2829 if (!flash_cmd.va) {
2830 status = -ENOMEM;
2831 dev_err(&adapter->pdev->dev,
2832 "Memory allocation failure while flashing\n");
485bf569 2833 goto be_fw_exit;
84517482
AK
2834 }
2835
3f0d4560
AK
2836 if ((adapter->generation == BE_GEN3) &&
2837 (get_ufigen_type(fhdr) == BE_GEN3)) {
2838 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2839 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2840 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2841 img_hdr_ptr = (struct image_hdr *) (fw->data +
2842 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2843 i * sizeof(struct image_hdr)));
2844 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2845 status = be_flash_data(adapter, fw, &flash_cmd,
2846 num_imgs);
3f0d4560
AK
2847 }
2848 } else if ((adapter->generation == BE_GEN2) &&
2849 (get_ufigen_type(fhdr) == BE_GEN2)) {
2850 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2851 } else {
2852 dev_err(&adapter->pdev->dev,
2853 "UFI and Interface are not compatible for flashing\n");
2854 status = -1;
84517482
AK
2855 }
2856
2b7bcebf
IV
2857 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2858 flash_cmd.dma);
84517482
AK
2859 if (status) {
2860 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2861 goto be_fw_exit;
84517482
AK
2862 }
2863
af901ca1 2864 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2865
485bf569
SN
2866be_fw_exit:
2867 return status;
2868}
2869
2870int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2871{
2872 const struct firmware *fw;
2873 int status;
2874
2875 if (!netif_running(adapter->netdev)) {
2876 dev_err(&adapter->pdev->dev,
2877 "Firmware load not allowed (interface is down)\n");
2878 return -1;
2879 }
2880
2881 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2882 if (status)
2883 goto fw_exit;
2884
2885 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2886
2887 if (lancer_chip(adapter))
2888 status = lancer_fw_download(adapter, fw);
2889 else
2890 status = be_fw_download(adapter, fw);
2891
84517482
AK
2892fw_exit:
2893 release_firmware(fw);
2894 return status;
2895}
2896
6b7c5b94
SP
2897static struct net_device_ops be_netdev_ops = {
2898 .ndo_open = be_open,
2899 .ndo_stop = be_close,
2900 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2901 .ndo_set_rx_mode = be_set_multicast_list,
2902 .ndo_set_mac_address = be_mac_addr_set,
2903 .ndo_change_mtu = be_change_mtu,
2904 .ndo_validate_addr = eth_validate_addr,
2905 .ndo_vlan_rx_register = be_vlan_register,
2906 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2907 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2908 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2909 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2910 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2911 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2912};
2913
2914static void be_netdev_init(struct net_device *netdev)
2915{
2916 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2917 struct be_rx_obj *rxo;
2918 int i;
6b7c5b94 2919
6332c8d3 2920 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2921 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2922 NETIF_F_HW_VLAN_TX;
2923 if (be_multi_rxq(adapter))
2924 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2925
2926 netdev->features |= netdev->hw_features |
8b8ddc68 2927 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2928
79032644
MM
2929 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2930 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2931
fe6d2a38
SP
2932 if (lancer_chip(adapter))
2933 netdev->vlan_features |= NETIF_F_TSO6;
2934
6b7c5b94
SP
2935 netdev->flags |= IFF_MULTICAST;
2936
9e90c961
AK
2937 /* Default settings for Rx and Tx flow control */
2938 adapter->rx_fc = true;
2939 adapter->tx_fc = true;
2940
c190e3c8
AK
2941 netif_set_gso_max_size(netdev, 65535);
2942
6b7c5b94
SP
2943 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2944
2945 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2946
3abcdeda
SP
2947 for_all_rx_queues(adapter, rxo, i)
2948 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2949 BE_NAPI_WEIGHT);
2950
5fb379ee 2951 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2952 BE_NAPI_WEIGHT);
6b7c5b94
SP
2953}
2954
2955static void be_unmap_pci_bars(struct be_adapter *adapter)
2956{
8788fdc2
SP
2957 if (adapter->csr)
2958 iounmap(adapter->csr);
2959 if (adapter->db)
2960 iounmap(adapter->db);
ba343c77 2961 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2962 iounmap(adapter->pcicfg);
6b7c5b94
SP
2963}
2964
2965static int be_map_pci_bars(struct be_adapter *adapter)
2966{
2967 u8 __iomem *addr;
ba343c77 2968 int pcicfg_reg, db_reg;
6b7c5b94 2969
fe6d2a38
SP
2970 if (lancer_chip(adapter)) {
2971 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2972 pci_resource_len(adapter->pdev, 0));
2973 if (addr == NULL)
2974 return -ENOMEM;
2975 adapter->db = addr;
2976 return 0;
2977 }
2978
ba343c77
SB
2979 if (be_physfn(adapter)) {
2980 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2981 pci_resource_len(adapter->pdev, 2));
2982 if (addr == NULL)
2983 return -ENOMEM;
2984 adapter->csr = addr;
2985 }
6b7c5b94 2986
ba343c77 2987 if (adapter->generation == BE_GEN2) {
7b139c83 2988 pcicfg_reg = 1;
ba343c77
SB
2989 db_reg = 4;
2990 } else {
7b139c83 2991 pcicfg_reg = 0;
ba343c77
SB
2992 if (be_physfn(adapter))
2993 db_reg = 4;
2994 else
2995 db_reg = 0;
2996 }
2997 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2998 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2999 if (addr == NULL)
3000 goto pci_map_err;
ba343c77
SB
3001 adapter->db = addr;
3002
3003 if (be_physfn(adapter)) {
3004 addr = ioremap_nocache(
3005 pci_resource_start(adapter->pdev, pcicfg_reg),
3006 pci_resource_len(adapter->pdev, pcicfg_reg));
3007 if (addr == NULL)
3008 goto pci_map_err;
3009 adapter->pcicfg = addr;
3010 } else
3011 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
3012
3013 return 0;
3014pci_map_err:
3015 be_unmap_pci_bars(adapter);
3016 return -ENOMEM;
3017}
3018
3019
3020static void be_ctrl_cleanup(struct be_adapter *adapter)
3021{
8788fdc2 3022 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3023
3024 be_unmap_pci_bars(adapter);
3025
3026 if (mem->va)
2b7bcebf
IV
3027 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3028 mem->dma);
e7b909a6
SP
3029
3030 mem = &adapter->mc_cmd_mem;
3031 if (mem->va)
2b7bcebf
IV
3032 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3033 mem->dma);
6b7c5b94
SP
3034}
3035
6b7c5b94
SP
3036static int be_ctrl_init(struct be_adapter *adapter)
3037{
8788fdc2
SP
3038 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3039 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 3040 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 3041 int status;
6b7c5b94
SP
3042
3043 status = be_map_pci_bars(adapter);
3044 if (status)
e7b909a6 3045 goto done;
6b7c5b94
SP
3046
3047 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3048 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3049 mbox_mem_alloc->size,
3050 &mbox_mem_alloc->dma,
3051 GFP_KERNEL);
6b7c5b94 3052 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3053 status = -ENOMEM;
3054 goto unmap_pci_bars;
6b7c5b94 3055 }
e7b909a6 3056
6b7c5b94
SP
3057 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3058 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3059 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3060 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
3061
3062 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2b7bcebf
IV
3063 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3064 mc_cmd_mem->size, &mc_cmd_mem->dma,
3065 GFP_KERNEL);
e7b909a6
SP
3066 if (mc_cmd_mem->va == NULL) {
3067 status = -ENOMEM;
3068 goto free_mbox;
3069 }
3070 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3071
2984961c 3072 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3073 spin_lock_init(&adapter->mcc_lock);
3074 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3075
dd131e76 3076 init_completion(&adapter->flash_compl);
cf588477 3077 pci_save_state(adapter->pdev);
6b7c5b94 3078 return 0;
e7b909a6
SP
3079
3080free_mbox:
2b7bcebf
IV
3081 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3082 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3083
3084unmap_pci_bars:
3085 be_unmap_pci_bars(adapter);
3086
3087done:
3088 return status;
6b7c5b94
SP
3089}
3090
3091static void be_stats_cleanup(struct be_adapter *adapter)
3092{
3abcdeda 3093 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3094
3095 if (cmd->va)
2b7bcebf
IV
3096 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3097 cmd->va, cmd->dma);
6b7c5b94
SP
3098}
3099
3100static int be_stats_init(struct be_adapter *adapter)
3101{
3abcdeda 3102 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3103
005d5696 3104 if (adapter->generation == BE_GEN2) {
89a88ab8 3105 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3106 } else {
3107 if (lancer_chip(adapter))
3108 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3109 else
3110 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3111 }
2b7bcebf
IV
3112 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3113 GFP_KERNEL);
6b7c5b94
SP
3114 if (cmd->va == NULL)
3115 return -1;
d291b9af 3116 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3117 return 0;
3118}
3119
3120static void __devexit be_remove(struct pci_dev *pdev)
3121{
3122 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3123
6b7c5b94
SP
3124 if (!adapter)
3125 return;
3126
f203af70
SK
3127 cancel_delayed_work_sync(&adapter->work);
3128
6b7c5b94
SP
3129 unregister_netdev(adapter->netdev);
3130
5fb379ee
SP
3131 be_clear(adapter);
3132
6b7c5b94
SP
3133 be_stats_cleanup(adapter);
3134
3135 be_ctrl_cleanup(adapter);
3136
48f5a191 3137 kfree(adapter->vf_cfg);
ba343c77
SB
3138 be_sriov_disable(adapter);
3139
8d56ff11 3140 be_msix_disable(adapter);
6b7c5b94
SP
3141
3142 pci_set_drvdata(pdev, NULL);
3143 pci_release_regions(pdev);
3144 pci_disable_device(pdev);
3145
3146 free_netdev(adapter->netdev);
3147}
3148
2243e2e9 3149static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3150{
6b7c5b94 3151 int status;
2243e2e9 3152 u8 mac[ETH_ALEN];
6b7c5b94 3153
2243e2e9 3154 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
3155 if (status)
3156 return status;
3157
3abcdeda
SP
3158 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3159 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3160 if (status)
3161 return status;
3162
2243e2e9 3163 memset(mac, 0, ETH_ALEN);
ba343c77 3164
12f4d0a8
ME
3165 /* A default permanent address is given to each VF for Lancer*/
3166 if (be_physfn(adapter) || lancer_chip(adapter)) {
ba343c77 3167 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 3168 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 3169
ba343c77
SB
3170 if (status)
3171 return status;
ca9e4988 3172
ba343c77
SB
3173 if (!is_valid_ether_addr(mac))
3174 return -EADDRNOTAVAIL;
3175
3176 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3177 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3178 }
6b7c5b94 3179
3486be29 3180 if (adapter->function_mode & 0x400)
82903e4b
AK
3181 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3182 else
3183 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3184
9e1453c5
AK
3185 status = be_cmd_get_cntl_attributes(adapter);
3186 if (status)
3187 return status;
3188
2e588f84 3189 be_cmd_check_native_mode(adapter);
2243e2e9 3190 return 0;
6b7c5b94
SP
3191}
3192
fe6d2a38
SP
3193static int be_dev_family_check(struct be_adapter *adapter)
3194{
3195 struct pci_dev *pdev = adapter->pdev;
3196 u32 sli_intf = 0, if_type;
3197
3198 switch (pdev->device) {
3199 case BE_DEVICE_ID1:
3200 case OC_DEVICE_ID1:
3201 adapter->generation = BE_GEN2;
3202 break;
3203 case BE_DEVICE_ID2:
3204 case OC_DEVICE_ID2:
3205 adapter->generation = BE_GEN3;
3206 break;
3207 case OC_DEVICE_ID3:
12f4d0a8 3208 case OC_DEVICE_ID4:
fe6d2a38
SP
3209 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3210 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3211 SLI_INTF_IF_TYPE_SHIFT;
3212
3213 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3214 if_type != 0x02) {
3215 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3216 return -EINVAL;
3217 }
fe6d2a38
SP
3218 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3219 SLI_INTF_FAMILY_SHIFT);
3220 adapter->generation = BE_GEN3;
3221 break;
3222 default:
3223 adapter->generation = 0;
3224 }
3225 return 0;
3226}
3227
37eed1cb
PR
3228static int lancer_wait_ready(struct be_adapter *adapter)
3229{
3230#define SLIPORT_READY_TIMEOUT 500
3231 u32 sliport_status;
3232 int status = 0, i;
3233
3234 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3235 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3236 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3237 break;
3238
3239 msleep(20);
3240 }
3241
3242 if (i == SLIPORT_READY_TIMEOUT)
3243 status = -1;
3244
3245 return status;
3246}
3247
3248static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3249{
3250 int status;
3251 u32 sliport_status, err, reset_needed;
3252 status = lancer_wait_ready(adapter);
3253 if (!status) {
3254 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3255 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3256 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3257 if (err && reset_needed) {
3258 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3259 adapter->db + SLIPORT_CONTROL_OFFSET);
3260
3261 /* check adapter has corrected the error */
3262 status = lancer_wait_ready(adapter);
3263 sliport_status = ioread32(adapter->db +
3264 SLIPORT_STATUS_OFFSET);
3265 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3266 SLIPORT_STATUS_RN_MASK);
3267 if (status || sliport_status)
3268 status = -1;
3269 } else if (err || reset_needed) {
3270 status = -1;
3271 }
3272 }
3273 return status;
3274}
3275
6b7c5b94
SP
3276static int __devinit be_probe(struct pci_dev *pdev,
3277 const struct pci_device_id *pdev_id)
3278{
3279 int status = 0;
3280 struct be_adapter *adapter;
3281 struct net_device *netdev;
6b7c5b94
SP
3282
3283 status = pci_enable_device(pdev);
3284 if (status)
3285 goto do_none;
3286
3287 status = pci_request_regions(pdev, DRV_NAME);
3288 if (status)
3289 goto disable_dev;
3290 pci_set_master(pdev);
3291
3292 netdev = alloc_etherdev(sizeof(struct be_adapter));
3293 if (netdev == NULL) {
3294 status = -ENOMEM;
3295 goto rel_reg;
3296 }
3297 adapter = netdev_priv(netdev);
3298 adapter->pdev = pdev;
3299 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3300
3301 status = be_dev_family_check(adapter);
63657b9c 3302 if (status)
fe6d2a38
SP
3303 goto free_netdev;
3304
6b7c5b94 3305 adapter->netdev = netdev;
2243e2e9 3306 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3307
2b7bcebf 3308 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3309 if (!status) {
3310 netdev->features |= NETIF_F_HIGHDMA;
3311 } else {
2b7bcebf 3312 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3313 if (status) {
3314 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3315 goto free_netdev;
3316 }
3317 }
3318
ba343c77 3319 be_sriov_enable(adapter);
48f5a191
AK
3320 if (adapter->sriov_enabled) {
3321 adapter->vf_cfg = kcalloc(num_vfs,
3322 sizeof(struct be_vf_cfg), GFP_KERNEL);
3323
3324 if (!adapter->vf_cfg)
3325 goto free_netdev;
3326 }
ba343c77 3327
6b7c5b94
SP
3328 status = be_ctrl_init(adapter);
3329 if (status)
48f5a191 3330 goto free_vf_cfg;
6b7c5b94 3331
37eed1cb
PR
3332 if (lancer_chip(adapter)) {
3333 status = lancer_test_and_set_rdy_state(adapter);
3334 if (status) {
3335 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3336 goto ctrl_clean;
37eed1cb
PR
3337 }
3338 }
3339
2243e2e9 3340 /* sync up with fw's ready state */
ba343c77
SB
3341 if (be_physfn(adapter)) {
3342 status = be_cmd_POST(adapter);
3343 if (status)
3344 goto ctrl_clean;
ba343c77 3345 }
6b7c5b94 3346
2243e2e9
SP
3347 /* tell fw we're ready to fire cmds */
3348 status = be_cmd_fw_init(adapter);
6b7c5b94 3349 if (status)
2243e2e9
SP
3350 goto ctrl_clean;
3351
a4b4dfab
AK
3352 status = be_cmd_reset_function(adapter);
3353 if (status)
3354 goto ctrl_clean;
556ae191 3355
2243e2e9
SP
3356 status = be_stats_init(adapter);
3357 if (status)
3358 goto ctrl_clean;
3359
3360 status = be_get_config(adapter);
6b7c5b94
SP
3361 if (status)
3362 goto stats_clean;
6b7c5b94 3363
3abcdeda
SP
3364 be_msix_enable(adapter);
3365
6b7c5b94 3366 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3367
5fb379ee
SP
3368 status = be_setup(adapter);
3369 if (status)
3abcdeda 3370 goto msix_disable;
2243e2e9 3371
3abcdeda 3372 be_netdev_init(netdev);
6b7c5b94
SP
3373 status = register_netdev(netdev);
3374 if (status != 0)
5fb379ee 3375 goto unsetup;
63a76944 3376 netif_carrier_off(netdev);
6b7c5b94 3377
e6319365 3378 if (be_physfn(adapter) && adapter->sriov_enabled) {
d0381c42
AK
3379 u8 mac_speed;
3380 bool link_up;
3381 u16 vf, lnk_speed;
3382
12f4d0a8
ME
3383 if (!lancer_chip(adapter)) {
3384 status = be_vf_eth_addr_config(adapter);
3385 if (status)
3386 goto unreg_netdev;
3387 }
d0381c42
AK
3388
3389 for (vf = 0; vf < num_vfs; vf++) {
3390 status = be_cmd_link_status_query(adapter, &link_up,
3391 &mac_speed, &lnk_speed, vf + 1);
3392 if (!status)
3393 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3394 else
3395 goto unreg_netdev;
3396 }
e6319365
AK
3397 }
3398
c4ca2374 3399 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
f203af70 3400 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3401 return 0;
3402
e6319365
AK
3403unreg_netdev:
3404 unregister_netdev(netdev);
5fb379ee
SP
3405unsetup:
3406 be_clear(adapter);
3abcdeda
SP
3407msix_disable:
3408 be_msix_disable(adapter);
6b7c5b94
SP
3409stats_clean:
3410 be_stats_cleanup(adapter);
3411ctrl_clean:
3412 be_ctrl_cleanup(adapter);
48f5a191
AK
3413free_vf_cfg:
3414 kfree(adapter->vf_cfg);
6b7c5b94 3415free_netdev:
ba343c77 3416 be_sriov_disable(adapter);
fe6d2a38 3417 free_netdev(netdev);
8d56ff11 3418 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3419rel_reg:
3420 pci_release_regions(pdev);
3421disable_dev:
3422 pci_disable_device(pdev);
3423do_none:
c4ca2374 3424 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3425 return status;
3426}
3427
3428static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3429{
3430 struct be_adapter *adapter = pci_get_drvdata(pdev);
3431 struct net_device *netdev = adapter->netdev;
3432
a4ca055f 3433 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3434 if (adapter->wol)
3435 be_setup_wol(adapter, true);
3436
6b7c5b94
SP
3437 netif_device_detach(netdev);
3438 if (netif_running(netdev)) {
3439 rtnl_lock();
3440 be_close(netdev);
3441 rtnl_unlock();
3442 }
9e90c961 3443 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3444 be_clear(adapter);
6b7c5b94 3445
a4ca055f 3446 be_msix_disable(adapter);
6b7c5b94
SP
3447 pci_save_state(pdev);
3448 pci_disable_device(pdev);
3449 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3450 return 0;
3451}
3452
3453static int be_resume(struct pci_dev *pdev)
3454{
3455 int status = 0;
3456 struct be_adapter *adapter = pci_get_drvdata(pdev);
3457 struct net_device *netdev = adapter->netdev;
3458
3459 netif_device_detach(netdev);
3460
3461 status = pci_enable_device(pdev);
3462 if (status)
3463 return status;
3464
3465 pci_set_power_state(pdev, 0);
3466 pci_restore_state(pdev);
3467
a4ca055f 3468 be_msix_enable(adapter);
2243e2e9
SP
3469 /* tell fw we're ready to fire cmds */
3470 status = be_cmd_fw_init(adapter);
3471 if (status)
3472 return status;
3473
9b0365f1 3474 be_setup(adapter);
6b7c5b94
SP
3475 if (netif_running(netdev)) {
3476 rtnl_lock();
3477 be_open(netdev);
3478 rtnl_unlock();
3479 }
3480 netif_device_attach(netdev);
71d8d1b5
AK
3481
3482 if (adapter->wol)
3483 be_setup_wol(adapter, false);
a4ca055f
AK
3484
3485 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3486 return 0;
3487}
3488
82456b03
SP
3489/*
3490 * An FLR will stop BE from DMAing any data.
3491 */
3492static void be_shutdown(struct pci_dev *pdev)
3493{
3494 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3495
2d5d4154
AK
3496 if (!adapter)
3497 return;
82456b03 3498
0f4a6828 3499 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3500
2d5d4154 3501 netif_device_detach(adapter->netdev);
82456b03 3502
82456b03
SP
3503 if (adapter->wol)
3504 be_setup_wol(adapter, true);
3505
57841869
AK
3506 be_cmd_reset_function(adapter);
3507
82456b03 3508 pci_disable_device(pdev);
82456b03
SP
3509}
3510
cf588477
SP
3511static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3512 pci_channel_state_t state)
3513{
3514 struct be_adapter *adapter = pci_get_drvdata(pdev);
3515 struct net_device *netdev = adapter->netdev;
3516
3517 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3518
3519 adapter->eeh_err = true;
3520
3521 netif_device_detach(netdev);
3522
3523 if (netif_running(netdev)) {
3524 rtnl_lock();
3525 be_close(netdev);
3526 rtnl_unlock();
3527 }
3528 be_clear(adapter);
3529
3530 if (state == pci_channel_io_perm_failure)
3531 return PCI_ERS_RESULT_DISCONNECT;
3532
3533 pci_disable_device(pdev);
3534
3535 return PCI_ERS_RESULT_NEED_RESET;
3536}
3537
3538static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3539{
3540 struct be_adapter *adapter = pci_get_drvdata(pdev);
3541 int status;
3542
3543 dev_info(&adapter->pdev->dev, "EEH reset\n");
3544 adapter->eeh_err = false;
3545
3546 status = pci_enable_device(pdev);
3547 if (status)
3548 return PCI_ERS_RESULT_DISCONNECT;
3549
3550 pci_set_master(pdev);
3551 pci_set_power_state(pdev, 0);
3552 pci_restore_state(pdev);
3553
3554 /* Check if card is ok and fw is ready */
3555 status = be_cmd_POST(adapter);
3556 if (status)
3557 return PCI_ERS_RESULT_DISCONNECT;
3558
3559 return PCI_ERS_RESULT_RECOVERED;
3560}
3561
3562static void be_eeh_resume(struct pci_dev *pdev)
3563{
3564 int status = 0;
3565 struct be_adapter *adapter = pci_get_drvdata(pdev);
3566 struct net_device *netdev = adapter->netdev;
3567
3568 dev_info(&adapter->pdev->dev, "EEH resume\n");
3569
3570 pci_save_state(pdev);
3571
3572 /* tell fw we're ready to fire cmds */
3573 status = be_cmd_fw_init(adapter);
3574 if (status)
3575 goto err;
3576
3577 status = be_setup(adapter);
3578 if (status)
3579 goto err;
3580
3581 if (netif_running(netdev)) {
3582 status = be_open(netdev);
3583 if (status)
3584 goto err;
3585 }
3586 netif_device_attach(netdev);
3587 return;
3588err:
3589 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3590}
3591
3592static struct pci_error_handlers be_eeh_handlers = {
3593 .error_detected = be_eeh_err_detected,
3594 .slot_reset = be_eeh_reset,
3595 .resume = be_eeh_resume,
3596};
3597
6b7c5b94
SP
3598static struct pci_driver be_driver = {
3599 .name = DRV_NAME,
3600 .id_table = be_dev_ids,
3601 .probe = be_probe,
3602 .remove = be_remove,
3603 .suspend = be_suspend,
cf588477 3604 .resume = be_resume,
82456b03 3605 .shutdown = be_shutdown,
cf588477 3606 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3607};
3608
3609static int __init be_init_module(void)
3610{
8e95a202
JP
3611 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3612 rx_frag_size != 2048) {
6b7c5b94
SP
3613 printk(KERN_WARNING DRV_NAME
3614 " : Module param rx_frag_size must be 2048/4096/8192."
3615 " Using 2048\n");
3616 rx_frag_size = 2048;
3617 }
6b7c5b94
SP
3618
3619 return pci_register_driver(&be_driver);
3620}
3621module_init(be_init_module);
3622
3623static void __exit be_exit_module(void)
3624{
3625 pci_unregister_driver(&be_driver);
3626}
3627module_exit(be_exit_module);
This page took 0.674917 seconds and 5 git commands to generate.