be2net: fix certain cmd failure logging
[deliverable/linux.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
6b7c5b94 19#include "be.h"
8788fdc2 20#include "be_cmds.h"
65f71b8b 21#include <asm/div64.h>
6b7c5b94
SP
22
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
2e588f84 29static ushort rx_frag_size = 2048;
ba343c77 30static unsigned int num_vfs;
2e588f84 31module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 33MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 35
6b7c5b94 36static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
43 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276
AK
46/* UE Status Low CSR */
47static char *ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
82static char *ue_status_hi_desc[] = {
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
106 "NETC"
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
6b7c5b94
SP
116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
2b7bcebf
IV
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
6b7c5b94
SP
123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
2b7bcebf
IV
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
6b7c5b94
SP
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
8788fdc2 142static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 143{
8788fdc2 144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 147
cf588477
SP
148 if (adapter->eeh_err)
149 return;
150
5f0b849e 151 if (!enabled && enable)
6b7c5b94 152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 153 else if (enabled && !enable)
6b7c5b94 154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else
6b7c5b94 156 return;
5f0b849e 157
6b7c5b94
SP
158 iowrite32(reg, addr);
159}
160
8788fdc2 161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
166
167 wmb();
8788fdc2 168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
169}
170
8788fdc2 171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
176
177 wmb();
8788fdc2 178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
179}
180
8788fdc2 181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
188
189 if (adapter->eeh_err)
190 return;
191
6b7c5b94
SP
192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
199}
200
8788fdc2 201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
207
208 if (adapter->eeh_err)
209 return;
210
6b7c5b94
SP
211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
215}
216
6b7c5b94
SP
217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
ca9e4988
AK
223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
ba343c77
SB
226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
f8617e08
AK
232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
a65027e4
SP
234 if (status)
235 return status;
6b7c5b94 236
a65027e4 237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 238 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 239netdev_addr:
6b7c5b94
SP
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
89a88ab8
AK
246static void populate_be2_stats(struct be_adapter *adapter)
247{
248
249 struct be_drv_stats *drvs = &adapter->drv_stats;
250 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
251 struct be_port_rxf_stats_v0 *port_stats =
252 be_port_rxf_stats_from_cmd(adapter);
253 struct be_rxf_stats_v0 *rxf_stats =
254 be_rxf_stats_from_cmd(adapter);
255
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
270 drvs->rx_input_fifo_overflow_drop =
271 port_stats->rx_input_fifo_overflow;
272 drvs->rx_dropped_header_too_small =
273 port_stats->rx_dropped_header_too_small;
274 drvs->rx_address_match_errors =
275 port_stats->rx_address_match_errors;
276 drvs->rx_alignment_symbol_errors =
277 port_stats->rx_alignment_symbol_errors;
278
279 drvs->tx_pauseframes = port_stats->tx_pauseframes;
280 drvs->tx_controlframes = port_stats->tx_controlframes;
281
282 if (adapter->port_num)
283 drvs->jabber_events =
284 rxf_stats->port1_jabber_events;
285 else
286 drvs->jabber_events =
287 rxf_stats->port0_jabber_events;
288 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
289 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
290 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
291 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
292 drvs->forwarded_packets = rxf_stats->forwarded_packets;
293 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
294 drvs->rx_drops_no_tpre_descr =
295 rxf_stats->rx_drops_no_tpre_descr;
296 drvs->rx_drops_too_many_frags =
297 rxf_stats->rx_drops_too_many_frags;
298 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
299}
300
301static void populate_be3_stats(struct be_adapter *adapter)
302{
303 struct be_drv_stats *drvs = &adapter->drv_stats;
304 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
305
306 struct be_rxf_stats_v1 *rxf_stats =
307 be_rxf_stats_from_cmd(adapter);
308 struct be_port_rxf_stats_v1 *port_stats =
309 be_port_rxf_stats_from_cmd(adapter);
310
311 drvs->rx_priority_pause_frames = 0;
312 drvs->pmem_fifo_overflow_drop = 0;
313 drvs->rx_pause_frames = port_stats->rx_pause_frames;
314 drvs->rx_crc_errors = port_stats->rx_crc_errors;
315 drvs->rx_control_frames = port_stats->rx_control_frames;
316 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
317 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
318 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
319 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
320 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
321 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
322 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
323 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
324 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
325 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
326 drvs->rx_dropped_header_too_small =
327 port_stats->rx_dropped_header_too_small;
328 drvs->rx_input_fifo_overflow_drop =
329 port_stats->rx_input_fifo_overflow_drop;
330 drvs->rx_address_match_errors =
331 port_stats->rx_address_match_errors;
332 drvs->rx_alignment_symbol_errors =
333 port_stats->rx_alignment_symbol_errors;
334 drvs->rxpp_fifo_overflow_drop =
335 port_stats->rxpp_fifo_overflow_drop;
336 drvs->tx_pauseframes = port_stats->tx_pauseframes;
337 drvs->tx_controlframes = port_stats->tx_controlframes;
338 drvs->jabber_events = port_stats->jabber_events;
339 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
340 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
341 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
342 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
343 drvs->forwarded_packets = rxf_stats->forwarded_packets;
344 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
345 drvs->rx_drops_no_tpre_descr =
346 rxf_stats->rx_drops_no_tpre_descr;
347 drvs->rx_drops_too_many_frags =
348 rxf_stats->rx_drops_too_many_frags;
349 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
350}
351
005d5696
SX
352static void populate_lancer_stats(struct be_adapter *adapter)
353{
89a88ab8 354
005d5696
SX
355 struct be_drv_stats *drvs = &adapter->drv_stats;
356 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
357 (adapter);
358 drvs->rx_priority_pause_frames = 0;
359 drvs->pmem_fifo_overflow_drop = 0;
360 drvs->rx_pause_frames =
aedfebba
SX
361 make_64bit_val(pport_stats->rx_pause_frames_hi,
362 pport_stats->rx_pause_frames_lo);
005d5696
SX
363 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
364 pport_stats->rx_crc_errors_lo);
365 drvs->rx_control_frames =
366 make_64bit_val(pport_stats->rx_control_frames_hi,
367 pport_stats->rx_control_frames_lo);
368 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long =
370 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
371 pport_stats->rx_frames_too_long_lo);
372 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
373 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
374 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
375 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
376 drvs->rx_dropped_tcp_length =
377 pport_stats->rx_dropped_invalid_tcp_length;
378 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
379 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
380 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
381 drvs->rx_dropped_header_too_small =
382 pport_stats->rx_dropped_header_too_small;
383 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
384 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
385 drvs->rx_alignment_symbol_errors =
386 make_64bit_val(pport_stats->rx_symbol_errors_hi,
387 pport_stats->rx_symbol_errors_lo);
388 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
389 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
390 pport_stats->tx_pause_frames_lo);
391 drvs->tx_controlframes =
392 make_64bit_val(pport_stats->tx_control_frames_hi,
393 pport_stats->tx_control_frames_lo);
394 drvs->jabber_events = pport_stats->rx_jabbers;
395 drvs->rx_drops_no_pbuf = 0;
396 drvs->rx_drops_no_txpb = 0;
397 drvs->rx_drops_no_erx_descr = 0;
398 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
399 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
400 pport_stats->num_forwards_lo);
401 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
402 pport_stats->rx_drops_mtu_lo);
403 drvs->rx_drops_no_tpre_descr = 0;
404 drvs->rx_drops_too_many_frags =
405 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
406 pport_stats->rx_drops_too_many_frags_lo);
407}
89a88ab8
AK
408
409void be_parse_stats(struct be_adapter *adapter)
410{
005d5696
SX
411 if (adapter->generation == BE_GEN3) {
412 if (lancer_chip(adapter))
413 populate_lancer_stats(adapter);
414 else
415 populate_be3_stats(adapter);
416 } else {
89a88ab8 417 populate_be2_stats(adapter);
005d5696 418 }
89a88ab8
AK
419}
420
b31c50a7 421void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 422{
89a88ab8 423 struct be_drv_stats *drvs = &adapter->drv_stats;
78122a52 424 struct net_device_stats *dev_stats = &adapter->netdev->stats;
3abcdeda 425 struct be_rx_obj *rxo;
3c8def97 426 struct be_tx_obj *txo;
6e53391c 427 unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
3abcdeda 428 int i;
6b7c5b94 429
3abcdeda 430 for_all_rx_queues(adapter, rxo, i) {
6e53391c
SP
431 pkts += rx_stats(rxo)->rx_pkts;
432 bytes += rx_stats(rxo)->rx_bytes;
433 mcast += rx_stats(rxo)->rx_mcast_pkts;
3abcdeda 434 /* no space in linux buffers: best possible approximation */
89a88ab8 435 if (adapter->generation == BE_GEN3) {
005d5696 436 if (!(lancer_chip(adapter))) {
6e53391c 437 struct be_erx_stats_v1 *erx =
89a88ab8 438 be_erx_stats_from_cmd(adapter);
6e53391c 439 drops += erx->rx_drops_no_fragments[rxo->q.id];
005d5696 440 }
89a88ab8 441 } else {
6e53391c 442 struct be_erx_stats_v0 *erx =
89a88ab8 443 be_erx_stats_from_cmd(adapter);
6e53391c 444 drops += erx->rx_drops_no_fragments[rxo->q.id];
89a88ab8 445 }
3abcdeda 446 }
6e53391c
SP
447 dev_stats->rx_packets = pkts;
448 dev_stats->rx_bytes = bytes;
449 dev_stats->multicast = mcast;
450 dev_stats->rx_dropped = drops;
3abcdeda 451
6e53391c 452 pkts = bytes = 0;
3c8def97 453 for_all_tx_queues(adapter, txo, i) {
6e53391c
SP
454 pkts += tx_stats(txo)->be_tx_pkts;
455 bytes += tx_stats(txo)->be_tx_bytes;
3c8def97 456 }
6e53391c
SP
457 dev_stats->tx_packets = pkts;
458 dev_stats->tx_bytes = bytes;
6b7c5b94
SP
459
460 /* bad pkts received */
89a88ab8
AK
461 dev_stats->rx_errors = drvs->rx_crc_errors +
462 drvs->rx_alignment_symbol_errors +
463 drvs->rx_in_range_errors +
464 drvs->rx_out_range_errors +
465 drvs->rx_frame_too_long +
466 drvs->rx_dropped_too_small +
467 drvs->rx_dropped_too_short +
468 drvs->rx_dropped_header_too_small +
469 drvs->rx_dropped_tcp_length +
470 drvs->rx_dropped_runt +
471 drvs->rx_tcp_checksum_errs +
472 drvs->rx_ip_checksum_errs +
473 drvs->rx_udp_checksum_errs;
68110868 474
6b7c5b94 475 /* detailed rx errors */
89a88ab8
AK
476 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
477 drvs->rx_out_range_errors +
478 drvs->rx_frame_too_long;
68110868 479
89a88ab8 480 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
481
482 /* frame alignment errors */
89a88ab8 483 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 484
6b7c5b94
SP
485 /* receiver fifo overrun */
486 /* drops_no_pbuf is no per i/f, it's per BE card */
89a88ab8
AK
487 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
488 drvs->rx_input_fifo_overflow_drop +
489 drvs->rx_drops_no_pbuf;
6b7c5b94
SP
490}
491
8788fdc2 492void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 493{
6b7c5b94
SP
494 struct net_device *netdev = adapter->netdev;
495
6b7c5b94 496 /* If link came up or went down */
a8f447bd 497 if (adapter->link_up != link_up) {
0dffc83e 498 adapter->link_speed = -1;
a8f447bd 499 if (link_up) {
6b7c5b94
SP
500 netif_carrier_on(netdev);
501 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd 502 } else {
a8f447bd
SP
503 netif_carrier_off(netdev);
504 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 505 }
a8f447bd 506 adapter->link_up = link_up;
6b7c5b94 507 }
6b7c5b94
SP
508}
509
510/* Update the EQ delay n BE based on the RX frags consumed / sec */
3abcdeda 511static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 512{
3abcdeda
SP
513 struct be_eq_obj *rx_eq = &rxo->rx_eq;
514 struct be_rx_stats *stats = &rxo->stats;
4097f663
SP
515 ulong now = jiffies;
516 u32 eqd;
517
518 if (!rx_eq->enable_aic)
519 return;
520
521 /* Wrapped around */
522 if (time_before(now, stats->rx_fps_jiffies)) {
523 stats->rx_fps_jiffies = now;
524 return;
525 }
6b7c5b94
SP
526
527 /* Update once a second */
4097f663 528 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
529 return;
530
3abcdeda 531 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
4097f663 532 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 533
4097f663 534 stats->rx_fps_jiffies = now;
3abcdeda
SP
535 stats->prev_rx_frags = stats->rx_frags;
536 eqd = stats->rx_fps / 110000;
6b7c5b94
SP
537 eqd = eqd << 3;
538 if (eqd > rx_eq->max_eqd)
539 eqd = rx_eq->max_eqd;
540 if (eqd < rx_eq->min_eqd)
541 eqd = rx_eq->min_eqd;
542 if (eqd < 10)
543 eqd = 0;
544 if (eqd != rx_eq->cur_eqd)
8788fdc2 545 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
546
547 rx_eq->cur_eqd = eqd;
548}
549
65f71b8b
SH
550static u32 be_calc_rate(u64 bytes, unsigned long ticks)
551{
552 u64 rate = bytes;
553
554 do_div(rate, ticks / HZ);
555 rate <<= 3; /* bytes/sec -> bits/sec */
556 do_div(rate, 1000000ul); /* MB/Sec */
557
558 return rate;
559}
560
3c8def97 561static void be_tx_rate_update(struct be_tx_obj *txo)
4097f663 562{
3c8def97 563 struct be_tx_stats *stats = tx_stats(txo);
4097f663
SP
564 ulong now = jiffies;
565
566 /* Wrapped around? */
567 if (time_before(now, stats->be_tx_jiffies)) {
568 stats->be_tx_jiffies = now;
569 return;
570 }
571
572 /* Update tx rate once in two seconds */
573 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
574 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
575 - stats->be_tx_bytes_prev,
576 now - stats->be_tx_jiffies);
4097f663
SP
577 stats->be_tx_jiffies = now;
578 stats->be_tx_bytes_prev = stats->be_tx_bytes;
579 }
580}
581
3c8def97 582static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 583 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 584{
3c8def97
SP
585 struct be_tx_stats *stats = tx_stats(txo);
586
6b7c5b94
SP
587 stats->be_tx_reqs++;
588 stats->be_tx_wrbs += wrb_cnt;
589 stats->be_tx_bytes += copied;
91992e44 590 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94
SP
591 if (stopped)
592 stats->be_tx_stops++;
6b7c5b94
SP
593}
594
595/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
596static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
597 bool *dummy)
6b7c5b94 598{
ebc8d2ab
DM
599 int cnt = (skb->len > skb->data_len);
600
601 cnt += skb_shinfo(skb)->nr_frags;
602
6b7c5b94
SP
603 /* to account for hdr wrb */
604 cnt++;
fe6d2a38
SP
605 if (lancer_chip(adapter) || !(cnt & 1)) {
606 *dummy = false;
607 } else {
6b7c5b94
SP
608 /* add a dummy to make it an even num */
609 cnt++;
610 *dummy = true;
fe6d2a38 611 }
6b7c5b94
SP
612 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
613 return cnt;
614}
615
616static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
617{
618 wrb->frag_pa_hi = upper_32_bits(addr);
619 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
620 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
621}
622
cc4ce020
SK
623static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
624 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 625{
cc4ce020
SK
626 u8 vlan_prio = 0;
627 u16 vlan_tag = 0;
628
6b7c5b94
SP
629 memset(hdr, 0, sizeof(*hdr));
630
631 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
632
49e4b847 633 if (skb_is_gso(skb)) {
6b7c5b94
SP
634 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
636 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 637 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 638 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
639 if (lancer_chip(adapter) && adapter->sli_family ==
640 LANCER_A0_SLI_FAMILY) {
641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
642 if (is_tcp_pkt(skb))
643 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
644 tcpcs, hdr, 1);
645 else if (is_udp_pkt(skb))
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
647 udpcs, hdr, 1);
648 }
6b7c5b94
SP
649 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650 if (is_tcp_pkt(skb))
651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652 else if (is_udp_pkt(skb))
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654 }
655
cc4ce020 656 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
6b7c5b94 657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
658 vlan_tag = vlan_tx_tag_get(skb);
659 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
660 /* If vlan priority provided by OS is NOT in available bmap */
661 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
662 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
663 adapter->recommended_prio;
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
665 }
666
667 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
670 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
671}
672
2b7bcebf 673static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
674 bool unmap_single)
675{
676 dma_addr_t dma;
677
678 be_dws_le_to_cpu(wrb, sizeof(*wrb));
679
680 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 681 if (wrb->frag_len) {
7101e111 682 if (unmap_single)
2b7bcebf
IV
683 dma_unmap_single(dev, dma, wrb->frag_len,
684 DMA_TO_DEVICE);
7101e111 685 else
2b7bcebf 686 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
687 }
688}
6b7c5b94 689
3c8def97 690static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
691 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
692{
7101e111
SP
693 dma_addr_t busaddr;
694 int i, copied = 0;
2b7bcebf 695 struct device *dev = &adapter->pdev->dev;
6b7c5b94 696 struct sk_buff *first_skb = skb;
6b7c5b94
SP
697 struct be_eth_wrb *wrb;
698 struct be_eth_hdr_wrb *hdr;
7101e111
SP
699 bool map_single = false;
700 u16 map_head;
6b7c5b94 701
6b7c5b94
SP
702 hdr = queue_head_node(txq);
703 queue_head_inc(txq);
7101e111 704 map_head = txq->head;
6b7c5b94 705
ebc8d2ab 706 if (skb->len > skb->data_len) {
e743d313 707 int len = skb_headlen(skb);
2b7bcebf
IV
708 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
709 if (dma_mapping_error(dev, busaddr))
7101e111
SP
710 goto dma_err;
711 map_single = true;
ebc8d2ab
DM
712 wrb = queue_head_node(txq);
713 wrb_fill(wrb, busaddr, len);
714 be_dws_cpu_to_le(wrb, sizeof(*wrb));
715 queue_head_inc(txq);
716 copied += len;
717 }
6b7c5b94 718
ebc8d2ab
DM
719 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
720 struct skb_frag_struct *frag =
721 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
722 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
723 frag->size, DMA_TO_DEVICE);
724 if (dma_mapping_error(dev, busaddr))
7101e111 725 goto dma_err;
ebc8d2ab
DM
726 wrb = queue_head_node(txq);
727 wrb_fill(wrb, busaddr, frag->size);
728 be_dws_cpu_to_le(wrb, sizeof(*wrb));
729 queue_head_inc(txq);
730 copied += frag->size;
6b7c5b94
SP
731 }
732
733 if (dummy_wrb) {
734 wrb = queue_head_node(txq);
735 wrb_fill(wrb, 0, 0);
736 be_dws_cpu_to_le(wrb, sizeof(*wrb));
737 queue_head_inc(txq);
738 }
739
cc4ce020 740 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
741 be_dws_cpu_to_le(hdr, sizeof(*hdr));
742
743 return copied;
7101e111
SP
744dma_err:
745 txq->head = map_head;
746 while (copied) {
747 wrb = queue_head_node(txq);
2b7bcebf 748 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
749 map_single = false;
750 copied -= wrb->frag_len;
751 queue_head_inc(txq);
752 }
753 return 0;
6b7c5b94
SP
754}
755
61357325 756static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 757 struct net_device *netdev)
6b7c5b94
SP
758{
759 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
760 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
761 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
762 u32 wrb_cnt = 0, copied = 0;
763 u32 start = txq->head;
764 bool dummy_wrb, stopped = false;
765
fe6d2a38 766 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 767
3c8def97 768 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
769 if (copied) {
770 /* record the sent skb in the sent_skb table */
3c8def97
SP
771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
c190e3c8
AK
773
774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
777 */
7101e111 778 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
3c8def97 781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
782 stopped = true;
783 }
6b7c5b94 784
c190e3c8 785 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 786
3c8def97 787 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 788 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
789 } else {
790 txq->head = start;
791 dev_kfree_skb_any(skb);
6b7c5b94 792 }
6b7c5b94
SP
793 return NETDEV_TX_OK;
794}
795
796static int be_change_mtu(struct net_device *netdev, int new_mtu)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
806 return -EINVAL;
807 }
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
812}
813
814/*
82903e4b
AK
815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 817 */
1da87b7f 818static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 819{
6b7c5b94
SP
820 u16 vtag[BE_NUM_VLANS_SUPPORTED];
821 u16 ntags = 0, i;
82903e4b 822 int status = 0;
1da87b7f
AK
823 u32 if_handle;
824
825 if (vf) {
826 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
827 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
828 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
829 }
6b7c5b94 830
82903e4b 831 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 832 /* Construct VLAN Table to give to HW */
b738127d 833 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
834 if (adapter->vlan_tag[i]) {
835 vtag[ntags] = cpu_to_le16(i);
836 ntags++;
837 }
838 }
b31c50a7
SP
839 status = be_cmd_vlan_config(adapter, adapter->if_handle,
840 vtag, ntags, 1, 0);
6b7c5b94 841 } else {
b31c50a7
SP
842 status = be_cmd_vlan_config(adapter, adapter->if_handle,
843 NULL, 0, 1, 1);
6b7c5b94 844 }
1da87b7f 845
b31c50a7 846 return status;
6b7c5b94
SP
847}
848
849static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
850{
851 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 852
6b7c5b94 853 adapter->vlan_grp = grp;
6b7c5b94
SP
854}
855
856static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
857{
858 struct be_adapter *adapter = netdev_priv(netdev);
859
1da87b7f 860 adapter->vlans_added++;
ba343c77
SB
861 if (!be_physfn(adapter))
862 return;
863
6b7c5b94 864 adapter->vlan_tag[vid] = 1;
82903e4b 865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 866 be_vid_config(adapter, false, 0);
6b7c5b94
SP
867}
868
869static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
870{
871 struct be_adapter *adapter = netdev_priv(netdev);
872
1da87b7f
AK
873 adapter->vlans_added--;
874 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
875
ba343c77
SB
876 if (!be_physfn(adapter))
877 return;
878
6b7c5b94 879 adapter->vlan_tag[vid] = 0;
82903e4b 880 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 881 be_vid_config(adapter, false, 0);
6b7c5b94
SP
882}
883
24307eef 884static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
885{
886 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 887
24307eef 888 if (netdev->flags & IFF_PROMISC) {
ecd0bf0f 889 be_cmd_promiscuous_config(adapter, true);
24307eef
SP
890 adapter->promiscuous = true;
891 goto done;
6b7c5b94
SP
892 }
893
25985edc 894 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
895 if (adapter->promiscuous) {
896 adapter->promiscuous = false;
ecd0bf0f 897 be_cmd_promiscuous_config(adapter, false);
6b7c5b94
SP
898 }
899
e7b909a6 900 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
901 if (netdev->flags & IFF_ALLMULTI ||
902 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 903 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 904 &adapter->mc_cmd_mem);
24307eef 905 goto done;
6b7c5b94 906 }
6b7c5b94 907
0ddf477b 908 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 909 &adapter->mc_cmd_mem);
24307eef
SP
910done:
911 return;
6b7c5b94
SP
912}
913
ba343c77
SB
914static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
915{
916 struct be_adapter *adapter = netdev_priv(netdev);
917 int status;
918
919 if (!adapter->sriov_enabled)
920 return -EPERM;
921
922 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
923 return -EINVAL;
924
64600ea5
AK
925 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
926 status = be_cmd_pmac_del(adapter,
927 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 928 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 929
64600ea5
AK
930 status = be_cmd_pmac_add(adapter, mac,
931 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 932 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
933
934 if (status)
ba343c77
SB
935 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
936 mac, vf);
64600ea5
AK
937 else
938 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
939
ba343c77
SB
940 return status;
941}
942
64600ea5
AK
943static int be_get_vf_config(struct net_device *netdev, int vf,
944 struct ifla_vf_info *vi)
945{
946 struct be_adapter *adapter = netdev_priv(netdev);
947
948 if (!adapter->sriov_enabled)
949 return -EPERM;
950
951 if (vf >= num_vfs)
952 return -EINVAL;
953
954 vi->vf = vf;
e1d18735 955 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 956 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
957 vi->qos = 0;
958 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
959
960 return 0;
961}
962
1da87b7f
AK
963static int be_set_vf_vlan(struct net_device *netdev,
964 int vf, u16 vlan, u8 qos)
965{
966 struct be_adapter *adapter = netdev_priv(netdev);
967 int status = 0;
968
969 if (!adapter->sriov_enabled)
970 return -EPERM;
971
972 if ((vf >= num_vfs) || (vlan > 4095))
973 return -EINVAL;
974
975 if (vlan) {
976 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
977 adapter->vlans_added++;
978 } else {
979 adapter->vf_cfg[vf].vf_vlan_tag = 0;
980 adapter->vlans_added--;
981 }
982
983 status = be_vid_config(adapter, true, vf);
984
985 if (status)
986 dev_info(&adapter->pdev->dev,
987 "VLAN %d config on VF %d failed\n", vlan, vf);
988 return status;
989}
990
e1d18735
AK
991static int be_set_vf_tx_rate(struct net_device *netdev,
992 int vf, int rate)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
995 int status = 0;
996
997 if (!adapter->sriov_enabled)
998 return -EPERM;
999
1000 if ((vf >= num_vfs) || (rate < 0))
1001 return -EINVAL;
1002
1003 if (rate > 10000)
1004 rate = 10000;
1005
1006 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 1007 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1008
1009 if (status)
1010 dev_info(&adapter->pdev->dev,
1011 "tx rate %d on VF %d failed\n", rate, vf);
1012 return status;
1013}
1014
3abcdeda 1015static void be_rx_rate_update(struct be_rx_obj *rxo)
6b7c5b94 1016{
3abcdeda 1017 struct be_rx_stats *stats = &rxo->stats;
4097f663 1018 ulong now = jiffies;
6b7c5b94 1019
4097f663 1020 /* Wrapped around */
3abcdeda
SP
1021 if (time_before(now, stats->rx_jiffies)) {
1022 stats->rx_jiffies = now;
4097f663
SP
1023 return;
1024 }
6b7c5b94
SP
1025
1026 /* Update the rate once in two seconds */
3abcdeda 1027 if ((now - stats->rx_jiffies) < 2 * HZ)
6b7c5b94
SP
1028 return;
1029
3abcdeda
SP
1030 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1031 now - stats->rx_jiffies);
1032 stats->rx_jiffies = now;
1033 stats->rx_bytes_prev = stats->rx_bytes;
6b7c5b94
SP
1034}
1035
3abcdeda 1036static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1037 struct be_rx_compl_info *rxcp)
4097f663 1038{
3abcdeda 1039 struct be_rx_stats *stats = &rxo->stats;
1ef78abe 1040
3abcdeda 1041 stats->rx_compl++;
2e588f84
SP
1042 stats->rx_frags += rxcp->num_rcvd;
1043 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1044 stats->rx_pkts++;
2e588f84 1045 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1046 stats->rx_mcast_pkts++;
2e588f84
SP
1047 if (rxcp->err)
1048 stats->rxcp_err++;
4097f663
SP
1049}
1050
2e588f84 1051static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1052{
19fad86f
PR
1053 /* L4 checksum is not reliable for non TCP/UDP packets.
1054 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1055 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1056 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1057}
1058
6b7c5b94 1059static struct be_rx_page_info *
3abcdeda
SP
1060get_rx_page_info(struct be_adapter *adapter,
1061 struct be_rx_obj *rxo,
1062 u16 frag_idx)
6b7c5b94
SP
1063{
1064 struct be_rx_page_info *rx_page_info;
3abcdeda 1065 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1066
3abcdeda 1067 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1068 BUG_ON(!rx_page_info->page);
1069
205859a2 1070 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1071 dma_unmap_page(&adapter->pdev->dev,
1072 dma_unmap_addr(rx_page_info, bus),
1073 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1074 rx_page_info->last_page_user = false;
1075 }
6b7c5b94
SP
1076
1077 atomic_dec(&rxq->used);
1078 return rx_page_info;
1079}
1080
1081/* Throwaway the data in the Rx completion */
1082static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1083 struct be_rx_obj *rxo,
2e588f84 1084 struct be_rx_compl_info *rxcp)
6b7c5b94 1085{
3abcdeda 1086 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1087 struct be_rx_page_info *page_info;
2e588f84 1088 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1089
e80d9da6 1090 for (i = 0; i < num_rcvd; i++) {
2e588f84 1091 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1092 put_page(page_info->page);
1093 memset(page_info, 0, sizeof(*page_info));
2e588f84 1094 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1095 }
1096}
1097
1098/*
1099 * skb_fill_rx_data forms a complete skb for an ether frame
1100 * indicated by rxcp.
1101 */
3abcdeda 1102static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1103 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1104{
3abcdeda 1105 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1106 struct be_rx_page_info *page_info;
2e588f84
SP
1107 u16 i, j;
1108 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1109 u8 *start;
6b7c5b94 1110
2e588f84 1111 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1112 start = page_address(page_info->page) + page_info->page_offset;
1113 prefetch(start);
1114
1115 /* Copy data in the first descriptor of this completion */
2e588f84 1116 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1117
1118 /* Copy the header portion into skb_data */
2e588f84 1119 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1120 memcpy(skb->data, start, hdr_len);
1121 skb->len = curr_frag_len;
1122 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1123 /* Complete packet has now been moved to data */
1124 put_page(page_info->page);
1125 skb->data_len = 0;
1126 skb->tail += curr_frag_len;
1127 } else {
1128 skb_shinfo(skb)->nr_frags = 1;
1129 skb_shinfo(skb)->frags[0].page = page_info->page;
1130 skb_shinfo(skb)->frags[0].page_offset =
1131 page_info->page_offset + hdr_len;
1132 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1133 skb->data_len = curr_frag_len - hdr_len;
1134 skb->tail += hdr_len;
1135 }
205859a2 1136 page_info->page = NULL;
6b7c5b94 1137
2e588f84
SP
1138 if (rxcp->pkt_size <= rx_frag_size) {
1139 BUG_ON(rxcp->num_rcvd != 1);
1140 return;
6b7c5b94
SP
1141 }
1142
1143 /* More frags present for this completion */
2e588f84
SP
1144 index_inc(&rxcp->rxq_idx, rxq->len);
1145 remaining = rxcp->pkt_size - curr_frag_len;
1146 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1147 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1148 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1149
bd46cb6c
AK
1150 /* Coalesce all frags from the same physical page in one slot */
1151 if (page_info->page_offset == 0) {
1152 /* Fresh page */
1153 j++;
1154 skb_shinfo(skb)->frags[j].page = page_info->page;
1155 skb_shinfo(skb)->frags[j].page_offset =
1156 page_info->page_offset;
1157 skb_shinfo(skb)->frags[j].size = 0;
1158 skb_shinfo(skb)->nr_frags++;
1159 } else {
1160 put_page(page_info->page);
1161 }
1162
1163 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
1164 skb->len += curr_frag_len;
1165 skb->data_len += curr_frag_len;
6b7c5b94 1166
2e588f84
SP
1167 remaining -= curr_frag_len;
1168 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1169 page_info->page = NULL;
6b7c5b94 1170 }
bd46cb6c 1171 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1172}
1173
5be93b9a 1174/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1175static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1176 struct be_rx_obj *rxo,
2e588f84 1177 struct be_rx_compl_info *rxcp)
6b7c5b94 1178{
6332c8d3 1179 struct net_device *netdev = adapter->netdev;
6b7c5b94 1180 struct sk_buff *skb;
89420424 1181
6332c8d3 1182 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1183 if (unlikely(!skb)) {
6b7c5b94
SP
1184 if (net_ratelimit())
1185 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
3abcdeda 1186 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1187 return;
1188 }
1189
2e588f84 1190 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1191
6332c8d3 1192 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1193 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1194 else
1195 skb_checksum_none_assert(skb);
6b7c5b94
SP
1196
1197 skb->truesize = skb->len + sizeof(struct sk_buff);
6332c8d3 1198 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1199 if (adapter->netdev->features & NETIF_F_RXHASH)
1200 skb->rxhash = rxcp->rss_hash;
1201
6b7c5b94 1202
2e588f84 1203 if (unlikely(rxcp->vlanf)) {
82903e4b 1204 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
6b7c5b94
SP
1205 kfree_skb(skb);
1206 return;
1207 }
6709d952
SK
1208 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1209 rxcp->vlan_tag);
6b7c5b94
SP
1210 } else {
1211 netif_receive_skb(skb);
1212 }
6b7c5b94
SP
1213}
1214
5be93b9a
AK
1215/* Process the RX completion indicated by rxcp when GRO is enabled */
1216static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1217 struct be_rx_obj *rxo,
2e588f84 1218 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1219{
1220 struct be_rx_page_info *page_info;
5be93b9a 1221 struct sk_buff *skb = NULL;
3abcdeda
SP
1222 struct be_queue_info *rxq = &rxo->q;
1223 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1224 u16 remaining, curr_frag_len;
1225 u16 i, j;
3968fa1e 1226
5be93b9a
AK
1227 skb = napi_get_frags(&eq_obj->napi);
1228 if (!skb) {
3abcdeda 1229 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1230 return;
1231 }
1232
2e588f84
SP
1233 remaining = rxcp->pkt_size;
1234 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1235 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1236
1237 curr_frag_len = min(remaining, rx_frag_size);
1238
bd46cb6c
AK
1239 /* Coalesce all frags from the same physical page in one slot */
1240 if (i == 0 || page_info->page_offset == 0) {
1241 /* First frag or Fresh page */
1242 j++;
5be93b9a
AK
1243 skb_shinfo(skb)->frags[j].page = page_info->page;
1244 skb_shinfo(skb)->frags[j].page_offset =
1245 page_info->page_offset;
1246 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1247 } else {
1248 put_page(page_info->page);
1249 }
5be93b9a 1250 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1251
bd46cb6c 1252 remaining -= curr_frag_len;
2e588f84 1253 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1254 memset(page_info, 0, sizeof(*page_info));
1255 }
bd46cb6c 1256 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1257
5be93b9a 1258 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1259 skb->len = rxcp->pkt_size;
1260 skb->data_len = rxcp->pkt_size;
1261 skb->truesize += rxcp->pkt_size;
5be93b9a 1262 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1263 if (adapter->netdev->features & NETIF_F_RXHASH)
1264 skb->rxhash = rxcp->rss_hash;
5be93b9a 1265
2e588f84 1266 if (likely(!rxcp->vlanf))
5be93b9a 1267 napi_gro_frags(&eq_obj->napi);
2e588f84 1268 else
6709d952
SK
1269 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1270 rxcp->vlan_tag);
2e588f84
SP
1271}
1272
1273static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1274 struct be_eth_rx_compl *compl,
1275 struct be_rx_compl_info *rxcp)
1276{
1277 rxcp->pkt_size =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1282 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1283 rxcp->ip_csum =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285 rxcp->l4_csum =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287 rxcp->ipv6 =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289 rxcp->rxq_idx =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291 rxcp->num_rcvd =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293 rxcp->pkt_type =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1295 rxcp->rss_hash =
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1297 if (rxcp->vlanf) {
1298 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1299 compl);
1300 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301 compl);
15d72184 1302 }
2e588f84
SP
1303}
1304
1305static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1306 struct be_eth_rx_compl *compl,
1307 struct be_rx_compl_info *rxcp)
1308{
1309 rxcp->pkt_size =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1311 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1312 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1313 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1314 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1315 rxcp->ip_csum =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1317 rxcp->l4_csum =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1319 rxcp->ipv6 =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1321 rxcp->rxq_idx =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1323 rxcp->num_rcvd =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1325 rxcp->pkt_type =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1327 rxcp->rss_hash =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1329 if (rxcp->vlanf) {
1330 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1331 compl);
1332 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1333 compl);
15d72184 1334 }
2e588f84
SP
1335}
1336
1337static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1338{
1339 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1340 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1341 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1342
2e588f84
SP
1343 /* For checking the valid bit it is Ok to use either definition as the
1344 * valid bit is at the same position in both v0 and v1 Rx compl */
1345 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1346 return NULL;
6b7c5b94 1347
2e588f84
SP
1348 rmb();
1349 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1350
2e588f84
SP
1351 if (adapter->be3_native)
1352 be_parse_rx_compl_v1(adapter, compl, rxcp);
1353 else
1354 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1355
15d72184
SP
1356 if (rxcp->vlanf) {
1357 /* vlanf could be wrongly set in some cards.
1358 * ignore if vtm is not set */
1359 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1360 rxcp->vlanf = 0;
6b7c5b94 1361
15d72184 1362 if (!lancer_chip(adapter))
3c709f8f 1363 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1364
3c709f8f
DM
1365 if (((adapter->pvid & VLAN_VID_MASK) ==
1366 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1367 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1368 rxcp->vlanf = 0;
1369 }
2e588f84
SP
1370
1371 /* As the compl has been parsed, reset it; we wont touch it again */
1372 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1373
3abcdeda 1374 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1375 return rxcp;
1376}
1377
1829b086 1378static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1379{
6b7c5b94 1380 u32 order = get_order(size);
1829b086 1381
6b7c5b94 1382 if (order > 0)
1829b086
ED
1383 gfp |= __GFP_COMP;
1384 return alloc_pages(gfp, order);
6b7c5b94
SP
1385}
1386
1387/*
1388 * Allocate a page, split it to fragments of size rx_frag_size and post as
1389 * receive buffers to BE
1390 */
1829b086 1391static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1392{
3abcdeda
SP
1393 struct be_adapter *adapter = rxo->adapter;
1394 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1395 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1396 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1397 struct page *pagep = NULL;
1398 struct be_eth_rx_d *rxd;
1399 u64 page_dmaaddr = 0, frag_dmaaddr;
1400 u32 posted, page_offset = 0;
1401
3abcdeda 1402 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1403 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1404 if (!pagep) {
1829b086 1405 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1406 if (unlikely(!pagep)) {
3abcdeda 1407 rxo->stats.rx_post_fail++;
6b7c5b94
SP
1408 break;
1409 }
2b7bcebf
IV
1410 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1411 0, adapter->big_page_size,
1412 DMA_FROM_DEVICE);
6b7c5b94
SP
1413 page_info->page_offset = 0;
1414 } else {
1415 get_page(pagep);
1416 page_info->page_offset = page_offset + rx_frag_size;
1417 }
1418 page_offset = page_info->page_offset;
1419 page_info->page = pagep;
fac6da5b 1420 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1421 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1422
1423 rxd = queue_head_node(rxq);
1424 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1425 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1426
1427 /* Any space left in the current big page for another frag? */
1428 if ((page_offset + rx_frag_size + rx_frag_size) >
1429 adapter->big_page_size) {
1430 pagep = NULL;
1431 page_info->last_page_user = true;
1432 }
26d92f92
SP
1433
1434 prev_page_info = page_info;
1435 queue_head_inc(rxq);
6b7c5b94
SP
1436 page_info = &page_info_tbl[rxq->head];
1437 }
1438 if (pagep)
26d92f92 1439 prev_page_info->last_page_user = true;
6b7c5b94
SP
1440
1441 if (posted) {
6b7c5b94 1442 atomic_add(posted, &rxq->used);
8788fdc2 1443 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1444 } else if (atomic_read(&rxq->used) == 0) {
1445 /* Let be_worker replenish when memory is available */
3abcdeda 1446 rxo->rx_post_starved = true;
6b7c5b94 1447 }
6b7c5b94
SP
1448}
1449
5fb379ee 1450static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1451{
6b7c5b94
SP
1452 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1453
1454 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1455 return NULL;
1456
f3eb62d2 1457 rmb();
6b7c5b94
SP
1458 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1459
1460 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1461
1462 queue_tail_inc(tx_cq);
1463 return txcp;
1464}
1465
3c8def97
SP
1466static u16 be_tx_compl_process(struct be_adapter *adapter,
1467 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1468{
3c8def97 1469 struct be_queue_info *txq = &txo->q;
a73b796e 1470 struct be_eth_wrb *wrb;
3c8def97 1471 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1472 struct sk_buff *sent_skb;
ec43b1a6
SP
1473 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1474 bool unmap_skb_hdr = true;
6b7c5b94 1475
ec43b1a6 1476 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1477 BUG_ON(!sent_skb);
ec43b1a6
SP
1478 sent_skbs[txq->tail] = NULL;
1479
1480 /* skip header wrb */
a73b796e 1481 queue_tail_inc(txq);
6b7c5b94 1482
ec43b1a6 1483 do {
6b7c5b94 1484 cur_index = txq->tail;
a73b796e 1485 wrb = queue_tail_node(txq);
2b7bcebf
IV
1486 unmap_tx_frag(&adapter->pdev->dev, wrb,
1487 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1488 unmap_skb_hdr = false;
1489
6b7c5b94
SP
1490 num_wrbs++;
1491 queue_tail_inc(txq);
ec43b1a6 1492 } while (cur_index != last_index);
6b7c5b94 1493
6b7c5b94 1494 kfree_skb(sent_skb);
4d586b82 1495 return num_wrbs;
6b7c5b94
SP
1496}
1497
859b1e4e
SP
1498static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1499{
1500 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1501
1502 if (!eqe->evt)
1503 return NULL;
1504
f3eb62d2 1505 rmb();
859b1e4e
SP
1506 eqe->evt = le32_to_cpu(eqe->evt);
1507 queue_tail_inc(&eq_obj->q);
1508 return eqe;
1509}
1510
1511static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1512 struct be_eq_obj *eq_obj,
1513 bool rearm)
859b1e4e
SP
1514{
1515 struct be_eq_entry *eqe;
1516 u16 num = 0;
1517
1518 while ((eqe = event_get(eq_obj)) != NULL) {
1519 eqe->evt = 0;
1520 num++;
1521 }
1522
1523 /* Deal with any spurious interrupts that come
1524 * without events
1525 */
3c8def97
SP
1526 if (!num)
1527 rearm = true;
1528
1529 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1530 if (num)
1531 napi_schedule(&eq_obj->napi);
1532
1533 return num;
1534}
1535
1536/* Just read and notify events without processing them.
1537 * Used at the time of destroying event queues */
1538static void be_eq_clean(struct be_adapter *adapter,
1539 struct be_eq_obj *eq_obj)
1540{
1541 struct be_eq_entry *eqe;
1542 u16 num = 0;
1543
1544 while ((eqe = event_get(eq_obj)) != NULL) {
1545 eqe->evt = 0;
1546 num++;
1547 }
1548
1549 if (num)
1550 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1551}
1552
3abcdeda 1553static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1554{
1555 struct be_rx_page_info *page_info;
3abcdeda
SP
1556 struct be_queue_info *rxq = &rxo->q;
1557 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1558 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1559 u16 tail;
1560
1561 /* First cleanup pending rx completions */
3abcdeda
SP
1562 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1563 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1564 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1565 }
1566
1567 /* Then free posted rx buffer that were not used */
1568 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1569 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1570 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1571 put_page(page_info->page);
1572 memset(page_info, 0, sizeof(*page_info));
1573 }
1574 BUG_ON(atomic_read(&rxq->used));
1575}
1576
3c8def97
SP
1577static void be_tx_compl_clean(struct be_adapter *adapter,
1578 struct be_tx_obj *txo)
6b7c5b94 1579{
3c8def97
SP
1580 struct be_queue_info *tx_cq = &txo->cq;
1581 struct be_queue_info *txq = &txo->q;
a8e9179a 1582 struct be_eth_tx_compl *txcp;
4d586b82 1583 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1584 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1585 struct sk_buff *sent_skb;
1586 bool dummy_wrb;
a8e9179a
SP
1587
1588 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1589 do {
1590 while ((txcp = be_tx_compl_get(tx_cq))) {
1591 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1592 wrb_index, txcp);
3c8def97 1593 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1594 cmpl++;
1595 }
1596 if (cmpl) {
1597 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1598 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1599 cmpl = 0;
4d586b82 1600 num_wrbs = 0;
a8e9179a
SP
1601 }
1602
1603 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1604 break;
1605
1606 mdelay(1);
1607 } while (true);
1608
1609 if (atomic_read(&txq->used))
1610 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1611 atomic_read(&txq->used));
b03388d6
SP
1612
1613 /* free posted tx for which compls will never arrive */
1614 while (atomic_read(&txq->used)) {
1615 sent_skb = sent_skbs[txq->tail];
1616 end_idx = txq->tail;
1617 index_adv(&end_idx,
fe6d2a38
SP
1618 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1619 txq->len);
3c8def97 1620 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1621 atomic_sub(num_wrbs, &txq->used);
b03388d6 1622 }
6b7c5b94
SP
1623}
1624
5fb379ee
SP
1625static void be_mcc_queues_destroy(struct be_adapter *adapter)
1626{
1627 struct be_queue_info *q;
5fb379ee 1628
8788fdc2 1629 q = &adapter->mcc_obj.q;
5fb379ee 1630 if (q->created)
8788fdc2 1631 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1632 be_queue_free(adapter, q);
1633
8788fdc2 1634 q = &adapter->mcc_obj.cq;
5fb379ee 1635 if (q->created)
8788fdc2 1636 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1637 be_queue_free(adapter, q);
1638}
1639
1640/* Must be called only after TX qs are created as MCC shares TX EQ */
1641static int be_mcc_queues_create(struct be_adapter *adapter)
1642{
1643 struct be_queue_info *q, *cq;
5fb379ee
SP
1644
1645 /* Alloc MCC compl queue */
8788fdc2 1646 cq = &adapter->mcc_obj.cq;
5fb379ee 1647 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1648 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1649 goto err;
1650
1651 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1652 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1653 goto mcc_cq_free;
1654
1655 /* Alloc MCC queue */
8788fdc2 1656 q = &adapter->mcc_obj.q;
5fb379ee
SP
1657 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1658 goto mcc_cq_destroy;
1659
1660 /* Ask BE to create MCC queue */
8788fdc2 1661 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1662 goto mcc_q_free;
1663
1664 return 0;
1665
1666mcc_q_free:
1667 be_queue_free(adapter, q);
1668mcc_cq_destroy:
8788fdc2 1669 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1670mcc_cq_free:
1671 be_queue_free(adapter, cq);
1672err:
1673 return -1;
1674}
1675
6b7c5b94
SP
1676static void be_tx_queues_destroy(struct be_adapter *adapter)
1677{
1678 struct be_queue_info *q;
3c8def97
SP
1679 struct be_tx_obj *txo;
1680 u8 i;
6b7c5b94 1681
3c8def97
SP
1682 for_all_tx_queues(adapter, txo, i) {
1683 q = &txo->q;
1684 if (q->created)
1685 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1686 be_queue_free(adapter, q);
6b7c5b94 1687
3c8def97
SP
1688 q = &txo->cq;
1689 if (q->created)
1690 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1691 be_queue_free(adapter, q);
1692 }
6b7c5b94 1693
859b1e4e
SP
1694 /* Clear any residual events */
1695 be_eq_clean(adapter, &adapter->tx_eq);
1696
6b7c5b94
SP
1697 q = &adapter->tx_eq.q;
1698 if (q->created)
8788fdc2 1699 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1700 be_queue_free(adapter, q);
1701}
1702
3c8def97 1703/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1704static int be_tx_queues_create(struct be_adapter *adapter)
1705{
1706 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1707 struct be_tx_obj *txo;
1708 u8 i;
6b7c5b94
SP
1709
1710 adapter->tx_eq.max_eqd = 0;
1711 adapter->tx_eq.min_eqd = 0;
1712 adapter->tx_eq.cur_eqd = 96;
1713 adapter->tx_eq.enable_aic = false;
3c8def97 1714
6b7c5b94 1715 eq = &adapter->tx_eq.q;
3c8def97
SP
1716 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1717 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1718 return -1;
1719
8788fdc2 1720 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1721 goto err;
ecd62107 1722 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1723
3c8def97
SP
1724 for_all_tx_queues(adapter, txo, i) {
1725 cq = &txo->cq;
1726 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1727 sizeof(struct be_eth_tx_compl)))
3c8def97 1728 goto err;
6b7c5b94 1729
3c8def97
SP
1730 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1731 goto err;
6b7c5b94 1732
3c8def97
SP
1733 q = &txo->q;
1734 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1735 sizeof(struct be_eth_wrb)))
1736 goto err;
6b7c5b94 1737
3c8def97
SP
1738 if (be_cmd_txq_create(adapter, q, cq))
1739 goto err;
1740 }
6b7c5b94
SP
1741 return 0;
1742
3c8def97
SP
1743err:
1744 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1745 return -1;
1746}
1747
1748static void be_rx_queues_destroy(struct be_adapter *adapter)
1749{
1750 struct be_queue_info *q;
3abcdeda
SP
1751 struct be_rx_obj *rxo;
1752 int i;
1753
1754 for_all_rx_queues(adapter, rxo, i) {
1755 q = &rxo->q;
1756 if (q->created) {
1757 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1758 /* After the rxq is invalidated, wait for a grace time
1759 * of 1ms for all dma to end and the flush compl to
1760 * arrive
1761 */
1762 mdelay(1);
1763 be_rx_q_clean(adapter, rxo);
1764 }
1765 be_queue_free(adapter, q);
1766
1767 q = &rxo->cq;
1768 if (q->created)
1769 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1770 be_queue_free(adapter, q);
1771
1772 /* Clear any residual events */
1773 q = &rxo->rx_eq.q;
1774 if (q->created) {
1775 be_eq_clean(adapter, &rxo->rx_eq);
1776 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1777 }
1778 be_queue_free(adapter, q);
6b7c5b94 1779 }
6b7c5b94
SP
1780}
1781
ac6a0c4a
SP
1782static u32 be_num_rxqs_want(struct be_adapter *adapter)
1783{
c814fd36 1784 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
ac6a0c4a
SP
1785 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1786 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1787 } else {
1788 dev_warn(&adapter->pdev->dev,
1789 "No support for multiple RX queues\n");
1790 return 1;
1791 }
1792}
1793
6b7c5b94
SP
1794static int be_rx_queues_create(struct be_adapter *adapter)
1795{
1796 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1797 struct be_rx_obj *rxo;
1798 int rc, i;
6b7c5b94 1799
ac6a0c4a
SP
1800 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1801 msix_enabled(adapter) ?
1802 adapter->num_msix_vec - 1 : 1);
1803 if (adapter->num_rx_qs != MAX_RX_QS)
1804 dev_warn(&adapter->pdev->dev,
1805 "Can create only %d RX queues", adapter->num_rx_qs);
1806
6b7c5b94 1807 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1808 for_all_rx_queues(adapter, rxo, i) {
1809 rxo->adapter = adapter;
1810 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1811 rxo->rx_eq.enable_aic = true;
1812
1813 /* EQ */
1814 eq = &rxo->rx_eq.q;
1815 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1816 sizeof(struct be_eq_entry));
1817 if (rc)
1818 goto err;
1819
1820 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1821 if (rc)
1822 goto err;
1823
ecd62107 1824 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1825
3abcdeda
SP
1826 /* CQ */
1827 cq = &rxo->cq;
1828 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1829 sizeof(struct be_eth_rx_compl));
1830 if (rc)
1831 goto err;
1832
1833 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1834 if (rc)
1835 goto err;
3abcdeda
SP
1836 /* Rx Q */
1837 q = &rxo->q;
1838 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1839 sizeof(struct be_eth_rx_d));
1840 if (rc)
1841 goto err;
1842
1843 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1844 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1845 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1846 if (rc)
1847 goto err;
1848 }
1849
1850 if (be_multi_rxq(adapter)) {
1851 u8 rsstable[MAX_RSS_QS];
1852
1853 for_all_rss_queues(adapter, rxo, i)
1854 rsstable[i] = rxo->rss_id;
1855
1856 rc = be_cmd_rss_config(adapter, rsstable,
1857 adapter->num_rx_qs - 1);
1858 if (rc)
1859 goto err;
1860 }
6b7c5b94
SP
1861
1862 return 0;
3abcdeda
SP
1863err:
1864 be_rx_queues_destroy(adapter);
1865 return -1;
6b7c5b94 1866}
6b7c5b94 1867
fe6d2a38 1868static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1869{
fe6d2a38
SP
1870 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1871 if (!eqe->evt)
1872 return false;
1873 else
1874 return true;
b628bde2
SP
1875}
1876
6b7c5b94
SP
1877static irqreturn_t be_intx(int irq, void *dev)
1878{
1879 struct be_adapter *adapter = dev;
3abcdeda 1880 struct be_rx_obj *rxo;
fe6d2a38 1881 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1882
fe6d2a38
SP
1883 if (lancer_chip(adapter)) {
1884 if (event_peek(&adapter->tx_eq))
3c8def97 1885 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1886 for_all_rx_queues(adapter, rxo, i) {
1887 if (event_peek(&rxo->rx_eq))
3c8def97 1888 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1889 }
6b7c5b94 1890
fe6d2a38
SP
1891 if (!(tx || rx))
1892 return IRQ_NONE;
3abcdeda 1893
fe6d2a38
SP
1894 } else {
1895 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1896 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1897 if (!isr)
1898 return IRQ_NONE;
1899
ecd62107 1900 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1901 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1902
1903 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1904 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1905 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1906 }
3abcdeda 1907 }
c001c213 1908
8788fdc2 1909 return IRQ_HANDLED;
6b7c5b94
SP
1910}
1911
1912static irqreturn_t be_msix_rx(int irq, void *dev)
1913{
3abcdeda
SP
1914 struct be_rx_obj *rxo = dev;
1915 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1916
3c8def97 1917 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1918
1919 return IRQ_HANDLED;
1920}
1921
5fb379ee 1922static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1923{
1924 struct be_adapter *adapter = dev;
1925
3c8def97 1926 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1927
1928 return IRQ_HANDLED;
1929}
1930
2e588f84 1931static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1932{
2e588f84 1933 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1934}
1935
49b05221 1936static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1937{
1938 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1939 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1940 struct be_adapter *adapter = rxo->adapter;
1941 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1942 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1943 u32 work_done;
1944
3abcdeda 1945 rxo->stats.rx_polls++;
6b7c5b94 1946 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1947 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1948 if (!rxcp)
1949 break;
1950
e80d9da6 1951 /* Ignore flush completions */
009dd872 1952 if (rxcp->num_rcvd && rxcp->pkt_size) {
2e588f84 1953 if (do_gro(rxcp))
64642811
SP
1954 be_rx_compl_process_gro(adapter, rxo, rxcp);
1955 else
1956 be_rx_compl_process(adapter, rxo, rxcp);
009dd872
PR
1957 } else if (rxcp->pkt_size == 0) {
1958 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1959 }
009dd872 1960
2e588f84 1961 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1962 }
1963
6b7c5b94 1964 /* Refill the queue */
3abcdeda 1965 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1966 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1967
1968 /* All consumed */
1969 if (work_done < budget) {
1970 napi_complete(napi);
8788fdc2 1971 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1972 } else {
1973 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1974 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1975 }
1976 return work_done;
1977}
1978
f31e50a8
SP
1979/* As TX and MCC share the same EQ check for both TX and MCC completions.
1980 * For TX/MCC we don't honour budget; consume everything
1981 */
1982static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1983{
f31e50a8
SP
1984 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1985 struct be_adapter *adapter =
1986 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1987 struct be_tx_obj *txo;
6b7c5b94 1988 struct be_eth_tx_compl *txcp;
3c8def97
SP
1989 int tx_compl, mcc_compl, status = 0;
1990 u8 i;
1991 u16 num_wrbs;
1992
1993 for_all_tx_queues(adapter, txo, i) {
1994 tx_compl = 0;
1995 num_wrbs = 0;
1996 while ((txcp = be_tx_compl_get(&txo->cq))) {
1997 num_wrbs += be_tx_compl_process(adapter, txo,
1998 AMAP_GET_BITS(struct amap_eth_tx_compl,
1999 wrb_index, txcp));
2000 tx_compl++;
2001 }
2002 if (tx_compl) {
2003 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2004
2005 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 2006
3c8def97
SP
2007 /* As Tx wrbs have been freed up, wake up netdev queue
2008 * if it was stopped due to lack of tx wrbs. */
2009 if (__netif_subqueue_stopped(adapter->netdev, i) &&
2010 atomic_read(&txo->q.used) < txo->q.len / 2) {
2011 netif_wake_subqueue(adapter->netdev, i);
2012 }
2013
2014 adapter->drv_stats.be_tx_events++;
2015 txo->stats.be_tx_compl += tx_compl;
2016 }
6b7c5b94
SP
2017 }
2018
f31e50a8
SP
2019 mcc_compl = be_process_mcc(adapter, &status);
2020
f31e50a8
SP
2021 if (mcc_compl) {
2022 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2023 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2024 }
2025
3c8def97 2026 napi_complete(napi);
6b7c5b94 2027
3c8def97 2028 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
6b7c5b94
SP
2029 return 1;
2030}
2031
d053de91 2032void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
2033{
2034 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2035 u32 i;
2036
2037 pci_read_config_dword(adapter->pdev,
2038 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2039 pci_read_config_dword(adapter->pdev,
2040 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2041 pci_read_config_dword(adapter->pdev,
2042 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2043 pci_read_config_dword(adapter->pdev,
2044 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2045
2046 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2047 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2048
d053de91
AK
2049 if (ue_status_lo || ue_status_hi) {
2050 adapter->ue_detected = true;
7acc2087 2051 adapter->eeh_err = true;
d053de91
AK
2052 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2053 }
2054
7c185276
AK
2055 if (ue_status_lo) {
2056 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2057 if (ue_status_lo & 1)
2058 dev_err(&adapter->pdev->dev,
2059 "UE: %s bit set\n", ue_status_low_desc[i]);
2060 }
2061 }
2062 if (ue_status_hi) {
2063 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2064 if (ue_status_hi & 1)
2065 dev_err(&adapter->pdev->dev,
2066 "UE: %s bit set\n", ue_status_hi_desc[i]);
2067 }
2068 }
2069
2070}
2071
ea1dae11
SP
2072static void be_worker(struct work_struct *work)
2073{
2074 struct be_adapter *adapter =
2075 container_of(work, struct be_adapter, work.work);
3abcdeda 2076 struct be_rx_obj *rxo;
3c8def97 2077 struct be_tx_obj *txo;
3abcdeda 2078 int i;
ea1dae11 2079
16da8250
SP
2080 if (!adapter->ue_detected && !lancer_chip(adapter))
2081 be_detect_dump_ue(adapter);
2082
f203af70
SK
2083 /* when interrupts are not yet enabled, just reap any pending
2084 * mcc completions */
2085 if (!netif_running(adapter->netdev)) {
2086 int mcc_compl, status = 0;
2087
2088 mcc_compl = be_process_mcc(adapter, &status);
2089
2090 if (mcc_compl) {
2091 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2092 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2093 }
9b037f38 2094
f203af70
SK
2095 goto reschedule;
2096 }
2097
005d5696
SX
2098 if (!adapter->stats_cmd_sent) {
2099 if (lancer_chip(adapter))
2100 lancer_cmd_get_pport_stats(adapter,
2101 &adapter->stats_cmd);
2102 else
2103 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2104 }
3c8def97
SP
2105
2106 for_all_tx_queues(adapter, txo, i)
2107 be_tx_rate_update(txo);
4097f663 2108
3abcdeda
SP
2109 for_all_rx_queues(adapter, rxo, i) {
2110 be_rx_rate_update(rxo);
2111 be_rx_eqd_update(adapter, rxo);
2112
2113 if (rxo->rx_post_starved) {
2114 rxo->rx_post_starved = false;
1829b086 2115 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 2116 }
ea1dae11
SP
2117 }
2118
f203af70 2119reschedule:
e74fbd03 2120 adapter->work_counter++;
ea1dae11
SP
2121 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2122}
2123
8d56ff11
SP
2124static void be_msix_disable(struct be_adapter *adapter)
2125{
ac6a0c4a 2126 if (msix_enabled(adapter)) {
8d56ff11 2127 pci_disable_msix(adapter->pdev);
ac6a0c4a 2128 adapter->num_msix_vec = 0;
3abcdeda
SP
2129 }
2130}
2131
6b7c5b94
SP
2132static void be_msix_enable(struct be_adapter *adapter)
2133{
3abcdeda 2134#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2135 int i, status, num_vec;
6b7c5b94 2136
ac6a0c4a 2137 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2138
ac6a0c4a 2139 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2140 adapter->msix_entries[i].entry = i;
2141
ac6a0c4a 2142 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2143 if (status == 0) {
2144 goto done;
2145 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2146 num_vec = status;
3abcdeda 2147 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2148 num_vec) == 0)
3abcdeda 2149 goto done;
3abcdeda
SP
2150 }
2151 return;
2152done:
ac6a0c4a
SP
2153 adapter->num_msix_vec = num_vec;
2154 return;
6b7c5b94
SP
2155}
2156
ba343c77
SB
2157static void be_sriov_enable(struct be_adapter *adapter)
2158{
344dbf10 2159 be_check_sriov_fn_type(adapter);
6dedec81 2160#ifdef CONFIG_PCI_IOV
ba343c77 2161 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2162 int status, pos;
2163 u16 nvfs;
2164
2165 pos = pci_find_ext_capability(adapter->pdev,
2166 PCI_EXT_CAP_ID_SRIOV);
2167 pci_read_config_word(adapter->pdev,
2168 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2169
2170 if (num_vfs > nvfs) {
2171 dev_info(&adapter->pdev->dev,
2172 "Device supports %d VFs and not %d\n",
2173 nvfs, num_vfs);
2174 num_vfs = nvfs;
2175 }
6dedec81 2176
ba343c77
SB
2177 status = pci_enable_sriov(adapter->pdev, num_vfs);
2178 adapter->sriov_enabled = status ? false : true;
2179 }
2180#endif
ba343c77
SB
2181}
2182
2183static void be_sriov_disable(struct be_adapter *adapter)
2184{
2185#ifdef CONFIG_PCI_IOV
2186 if (adapter->sriov_enabled) {
2187 pci_disable_sriov(adapter->pdev);
2188 adapter->sriov_enabled = false;
2189 }
2190#endif
2191}
2192
fe6d2a38
SP
2193static inline int be_msix_vec_get(struct be_adapter *adapter,
2194 struct be_eq_obj *eq_obj)
6b7c5b94 2195{
ecd62107 2196 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2197}
2198
b628bde2
SP
2199static int be_request_irq(struct be_adapter *adapter,
2200 struct be_eq_obj *eq_obj,
3abcdeda 2201 void *handler, char *desc, void *context)
6b7c5b94
SP
2202{
2203 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2204 int vec;
2205
2206 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2207 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2208 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2209}
2210
3abcdeda
SP
2211static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2212 void *context)
b628bde2 2213{
fe6d2a38 2214 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2215 free_irq(vec, context);
b628bde2 2216}
6b7c5b94 2217
b628bde2
SP
2218static int be_msix_register(struct be_adapter *adapter)
2219{
3abcdeda
SP
2220 struct be_rx_obj *rxo;
2221 int status, i;
2222 char qname[10];
b628bde2 2223
3abcdeda
SP
2224 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2225 adapter);
6b7c5b94
SP
2226 if (status)
2227 goto err;
2228
3abcdeda
SP
2229 for_all_rx_queues(adapter, rxo, i) {
2230 sprintf(qname, "rxq%d", i);
2231 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2232 qname, rxo);
2233 if (status)
2234 goto err_msix;
2235 }
b628bde2 2236
6b7c5b94 2237 return 0;
b628bde2 2238
3abcdeda
SP
2239err_msix:
2240 be_free_irq(adapter, &adapter->tx_eq, adapter);
2241
2242 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2243 be_free_irq(adapter, &rxo->rx_eq, rxo);
2244
6b7c5b94
SP
2245err:
2246 dev_warn(&adapter->pdev->dev,
2247 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2248 be_msix_disable(adapter);
6b7c5b94
SP
2249 return status;
2250}
2251
2252static int be_irq_register(struct be_adapter *adapter)
2253{
2254 struct net_device *netdev = adapter->netdev;
2255 int status;
2256
ac6a0c4a 2257 if (msix_enabled(adapter)) {
6b7c5b94
SP
2258 status = be_msix_register(adapter);
2259 if (status == 0)
2260 goto done;
ba343c77
SB
2261 /* INTx is not supported for VF */
2262 if (!be_physfn(adapter))
2263 return status;
6b7c5b94
SP
2264 }
2265
2266 /* INTx */
2267 netdev->irq = adapter->pdev->irq;
2268 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2269 adapter);
2270 if (status) {
2271 dev_err(&adapter->pdev->dev,
2272 "INTx request IRQ failed - err %d\n", status);
2273 return status;
2274 }
2275done:
2276 adapter->isr_registered = true;
2277 return 0;
2278}
2279
2280static void be_irq_unregister(struct be_adapter *adapter)
2281{
2282 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2283 struct be_rx_obj *rxo;
2284 int i;
6b7c5b94
SP
2285
2286 if (!adapter->isr_registered)
2287 return;
2288
2289 /* INTx */
ac6a0c4a 2290 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2291 free_irq(netdev->irq, adapter);
2292 goto done;
2293 }
2294
2295 /* MSIx */
3abcdeda
SP
2296 be_free_irq(adapter, &adapter->tx_eq, adapter);
2297
2298 for_all_rx_queues(adapter, rxo, i)
2299 be_free_irq(adapter, &rxo->rx_eq, rxo);
2300
6b7c5b94
SP
2301done:
2302 adapter->isr_registered = false;
6b7c5b94
SP
2303}
2304
889cd4b2
SP
2305static int be_close(struct net_device *netdev)
2306{
2307 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2308 struct be_rx_obj *rxo;
3c8def97 2309 struct be_tx_obj *txo;
889cd4b2 2310 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2311 int vec, i;
889cd4b2 2312
889cd4b2
SP
2313 be_async_mcc_disable(adapter);
2314
889cd4b2
SP
2315 netif_carrier_off(netdev);
2316 adapter->link_up = false;
2317
fe6d2a38
SP
2318 if (!lancer_chip(adapter))
2319 be_intr_set(adapter, false);
889cd4b2 2320
63fcb27f
PR
2321 for_all_rx_queues(adapter, rxo, i)
2322 napi_disable(&rxo->rx_eq.napi);
2323
2324 napi_disable(&tx_eq->napi);
2325
2326 if (lancer_chip(adapter)) {
63fcb27f
PR
2327 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2328 for_all_rx_queues(adapter, rxo, i)
2329 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2330 for_all_tx_queues(adapter, txo, i)
2331 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2332 }
2333
ac6a0c4a 2334 if (msix_enabled(adapter)) {
fe6d2a38 2335 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2336 synchronize_irq(vec);
3abcdeda
SP
2337
2338 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2339 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2340 synchronize_irq(vec);
2341 }
889cd4b2
SP
2342 } else {
2343 synchronize_irq(netdev->irq);
2344 }
2345 be_irq_unregister(adapter);
2346
889cd4b2
SP
2347 /* Wait for all pending tx completions to arrive so that
2348 * all tx skbs are freed.
2349 */
3c8def97
SP
2350 for_all_tx_queues(adapter, txo, i)
2351 be_tx_compl_clean(adapter, txo);
889cd4b2
SP
2352
2353 return 0;
2354}
2355
6b7c5b94
SP
2356static int be_open(struct net_device *netdev)
2357{
2358 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2359 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2360 struct be_rx_obj *rxo;
a8f447bd 2361 bool link_up;
3abcdeda 2362 int status, i;
0388f251
SB
2363 u8 mac_speed;
2364 u16 link_speed;
5fb379ee 2365
3abcdeda 2366 for_all_rx_queues(adapter, rxo, i) {
1829b086 2367 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda
SP
2368 napi_enable(&rxo->rx_eq.napi);
2369 }
5fb379ee
SP
2370 napi_enable(&tx_eq->napi);
2371
2372 be_irq_register(adapter);
2373
fe6d2a38
SP
2374 if (!lancer_chip(adapter))
2375 be_intr_set(adapter, true);
5fb379ee
SP
2376
2377 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2378 for_all_rx_queues(adapter, rxo, i) {
2379 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2380 be_cq_notify(adapter, rxo->cq.id, true, 0);
2381 }
8788fdc2 2382 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2383
7a1e9b20
SP
2384 /* Now that interrupts are on we can process async mcc */
2385 be_async_mcc_enable(adapter);
2386
0388f251 2387 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
187e8756 2388 &link_speed, 0);
a8f447bd 2389 if (status)
889cd4b2 2390 goto err;
a8f447bd 2391 be_link_status_update(adapter, link_up);
5fb379ee 2392
889cd4b2 2393 if (be_physfn(adapter)) {
1da87b7f 2394 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2395 if (status)
2396 goto err;
4f2aa89c 2397
ba343c77
SB
2398 status = be_cmd_set_flow_control(adapter,
2399 adapter->tx_fc, adapter->rx_fc);
2400 if (status)
889cd4b2 2401 goto err;
ba343c77 2402 }
4f2aa89c 2403
889cd4b2
SP
2404 return 0;
2405err:
2406 be_close(adapter->netdev);
2407 return -EIO;
5fb379ee
SP
2408}
2409
71d8d1b5
AK
2410static int be_setup_wol(struct be_adapter *adapter, bool enable)
2411{
2412 struct be_dma_mem cmd;
2413 int status = 0;
2414 u8 mac[ETH_ALEN];
2415
2416 memset(mac, 0, ETH_ALEN);
2417
2418 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2419 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2420 GFP_KERNEL);
71d8d1b5
AK
2421 if (cmd.va == NULL)
2422 return -1;
2423 memset(cmd.va, 0, cmd.size);
2424
2425 if (enable) {
2426 status = pci_write_config_dword(adapter->pdev,
2427 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2428 if (status) {
2429 dev_err(&adapter->pdev->dev,
2381a55c 2430 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2431 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2432 cmd.dma);
71d8d1b5
AK
2433 return status;
2434 }
2435 status = be_cmd_enable_magic_wol(adapter,
2436 adapter->netdev->dev_addr, &cmd);
2437 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2438 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2439 } else {
2440 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2441 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2442 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2443 }
2444
2b7bcebf 2445 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2446 return status;
2447}
2448
6d87f5c3
AK
2449/*
2450 * Generate a seed MAC address from the PF MAC Address using jhash.
2451 * MAC Address for VFs are assigned incrementally starting from the seed.
2452 * These addresses are programmed in the ASIC by the PF and the VF driver
2453 * queries for the MAC address during its probe.
2454 */
2455static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2456{
2457 u32 vf = 0;
3abcdeda 2458 int status = 0;
6d87f5c3
AK
2459 u8 mac[ETH_ALEN];
2460
2461 be_vf_eth_addr_generate(adapter, mac);
2462
2463 for (vf = 0; vf < num_vfs; vf++) {
2464 status = be_cmd_pmac_add(adapter, mac,
2465 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2466 &adapter->vf_cfg[vf].vf_pmac_id,
2467 vf + 1);
6d87f5c3
AK
2468 if (status)
2469 dev_err(&adapter->pdev->dev,
2470 "Mac address add failed for VF %d\n", vf);
2471 else
2472 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2473
2474 mac[5] += 1;
2475 }
2476 return status;
2477}
2478
2479static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2480{
2481 u32 vf;
2482
2483 for (vf = 0; vf < num_vfs; vf++) {
2484 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2485 be_cmd_pmac_del(adapter,
2486 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2487 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2488 }
2489}
2490
5fb379ee
SP
2491static int be_setup(struct be_adapter *adapter)
2492{
5fb379ee 2493 struct net_device *netdev = adapter->netdev;
ba343c77 2494 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2495 int status;
ba343c77
SB
2496 u8 mac[ETH_ALEN];
2497
f21b538c
PR
2498 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2499 BE_IF_FLAGS_BROADCAST |
2500 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2501
ba343c77
SB
2502 if (be_physfn(adapter)) {
2503 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2504 BE_IF_FLAGS_PROMISCUOUS |
2505 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2506 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2507
ac6a0c4a 2508 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2509 cap_flags |= BE_IF_FLAGS_RSS;
2510 en_flags |= BE_IF_FLAGS_RSS;
2511 }
ba343c77 2512 }
73d540f2
SP
2513
2514 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2515 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2516 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2517 if (status != 0)
2518 goto do_none;
2519
ba343c77 2520 if (be_physfn(adapter)) {
c99ac3e7
AK
2521 if (adapter->sriov_enabled) {
2522 while (vf < num_vfs) {
2523 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2524 BE_IF_FLAGS_BROADCAST;
2525 status = be_cmd_if_create(adapter, cap_flags,
2526 en_flags, mac, true,
64600ea5 2527 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2528 NULL, vf+1);
c99ac3e7
AK
2529 if (status) {
2530 dev_err(&adapter->pdev->dev,
2531 "Interface Create failed for VF %d\n",
2532 vf);
2533 goto if_destroy;
2534 }
2535 adapter->vf_cfg[vf].vf_pmac_id =
2536 BE_INVALID_PMAC_ID;
2537 vf++;
ba343c77 2538 }
84e5b9f7 2539 }
c99ac3e7 2540 } else {
ba343c77
SB
2541 status = be_cmd_mac_addr_query(adapter, mac,
2542 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2543 if (!status) {
2544 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2545 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2546 }
2547 }
2548
6b7c5b94
SP
2549 status = be_tx_queues_create(adapter);
2550 if (status != 0)
2551 goto if_destroy;
2552
2553 status = be_rx_queues_create(adapter);
2554 if (status != 0)
2555 goto tx_qs_destroy;
2556
2903dd65
SP
2557 /* Allow all priorities by default. A GRP5 evt may modify this */
2558 adapter->vlan_prio_bmap = 0xff;
2559
5fb379ee
SP
2560 status = be_mcc_queues_create(adapter);
2561 if (status != 0)
2562 goto rx_qs_destroy;
6b7c5b94 2563
0dffc83e
AK
2564 adapter->link_speed = -1;
2565
6b7c5b94
SP
2566 return 0;
2567
5fb379ee
SP
2568rx_qs_destroy:
2569 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2570tx_qs_destroy:
2571 be_tx_queues_destroy(adapter);
2572if_destroy:
c99ac3e7
AK
2573 if (be_physfn(adapter) && adapter->sriov_enabled)
2574 for (vf = 0; vf < num_vfs; vf++)
2575 if (adapter->vf_cfg[vf].vf_if_handle)
2576 be_cmd_if_destroy(adapter,
658681f7
AK
2577 adapter->vf_cfg[vf].vf_if_handle,
2578 vf + 1);
2579 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2580do_none:
2581 return status;
2582}
2583
5fb379ee
SP
2584static int be_clear(struct be_adapter *adapter)
2585{
7ab8b0b4
AK
2586 int vf;
2587
c99ac3e7 2588 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2589 be_vf_eth_addr_rem(adapter);
2590
1a8887d8 2591 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2592 be_rx_queues_destroy(adapter);
2593 be_tx_queues_destroy(adapter);
1f5db833 2594 adapter->eq_next_idx = 0;
5fb379ee 2595
7ab8b0b4
AK
2596 if (be_physfn(adapter) && adapter->sriov_enabled)
2597 for (vf = 0; vf < num_vfs; vf++)
2598 if (adapter->vf_cfg[vf].vf_if_handle)
2599 be_cmd_if_destroy(adapter,
2600 adapter->vf_cfg[vf].vf_if_handle,
2601 vf + 1);
2602
658681f7 2603 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2604
2243e2e9
SP
2605 /* tell fw we're done with firing cmds */
2606 be_cmd_fw_clean(adapter);
5fb379ee
SP
2607 return 0;
2608}
2609
6b7c5b94 2610
84517482 2611#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2612static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2613 const u8 *p, u32 img_start, int image_size,
2614 int hdr_size)
fa9a6fed
SB
2615{
2616 u32 crc_offset;
2617 u8 flashed_crc[4];
2618 int status;
3f0d4560
AK
2619
2620 crc_offset = hdr_size + img_start + image_size - 4;
2621
fa9a6fed 2622 p += crc_offset;
3f0d4560
AK
2623
2624 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2625 (image_size - 4));
fa9a6fed
SB
2626 if (status) {
2627 dev_err(&adapter->pdev->dev,
2628 "could not get crc from flash, not flashing redboot\n");
2629 return false;
2630 }
2631
2632 /*update redboot only if crc does not match*/
2633 if (!memcmp(flashed_crc, p, 4))
2634 return false;
2635 else
2636 return true;
fa9a6fed
SB
2637}
2638
3f0d4560 2639static int be_flash_data(struct be_adapter *adapter,
84517482 2640 const struct firmware *fw,
3f0d4560
AK
2641 struct be_dma_mem *flash_cmd, int num_of_images)
2642
84517482 2643{
3f0d4560
AK
2644 int status = 0, i, filehdr_size = 0;
2645 u32 total_bytes = 0, flash_op;
84517482
AK
2646 int num_bytes;
2647 const u8 *p = fw->data;
2648 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2649 const struct flash_comp *pflashcomp;
9fe96934 2650 int num_comp;
3f0d4560 2651
215faf9c 2652 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2653 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2654 FLASH_IMAGE_MAX_SIZE_g3},
2655 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2656 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2657 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2658 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2659 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2660 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2661 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2662 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2663 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2664 FLASH_IMAGE_MAX_SIZE_g3},
2665 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2666 FLASH_IMAGE_MAX_SIZE_g3},
2667 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2668 FLASH_IMAGE_MAX_SIZE_g3},
2669 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2670 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2671 };
215faf9c 2672 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2673 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2674 FLASH_IMAGE_MAX_SIZE_g2},
2675 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2676 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2677 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2678 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2679 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2680 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2681 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2682 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2683 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2684 FLASH_IMAGE_MAX_SIZE_g2},
2685 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2686 FLASH_IMAGE_MAX_SIZE_g2},
2687 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2688 FLASH_IMAGE_MAX_SIZE_g2}
2689 };
2690
2691 if (adapter->generation == BE_GEN3) {
2692 pflashcomp = gen3_flash_types;
2693 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2694 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2695 } else {
2696 pflashcomp = gen2_flash_types;
2697 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2698 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2699 }
9fe96934
SB
2700 for (i = 0; i < num_comp; i++) {
2701 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2702 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2703 continue;
3f0d4560
AK
2704 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2705 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2706 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2707 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2708 continue;
2709 p = fw->data;
2710 p += filehdr_size + pflashcomp[i].offset
2711 + (num_of_images * sizeof(struct image_hdr));
2712 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2713 return -1;
3f0d4560
AK
2714 total_bytes = pflashcomp[i].size;
2715 while (total_bytes) {
2716 if (total_bytes > 32*1024)
2717 num_bytes = 32*1024;
2718 else
2719 num_bytes = total_bytes;
2720 total_bytes -= num_bytes;
2721
2722 if (!total_bytes)
2723 flash_op = FLASHROM_OPER_FLASH;
2724 else
2725 flash_op = FLASHROM_OPER_SAVE;
2726 memcpy(req->params.data_buf, p, num_bytes);
2727 p += num_bytes;
2728 status = be_cmd_write_flashrom(adapter, flash_cmd,
2729 pflashcomp[i].optype, flash_op, num_bytes);
2730 if (status) {
2731 dev_err(&adapter->pdev->dev,
2732 "cmd to write to flash rom failed.\n");
2733 return -1;
2734 }
84517482 2735 }
84517482 2736 }
84517482
AK
2737 return 0;
2738}
2739
3f0d4560
AK
2740static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2741{
2742 if (fhdr == NULL)
2743 return 0;
2744 if (fhdr->build[0] == '3')
2745 return BE_GEN3;
2746 else if (fhdr->build[0] == '2')
2747 return BE_GEN2;
2748 else
2749 return 0;
2750}
2751
485bf569
SN
2752static int lancer_fw_download(struct be_adapter *adapter,
2753 const struct firmware *fw)
84517482 2754{
485bf569
SN
2755#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2756#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2757 struct be_dma_mem flash_cmd;
485bf569
SN
2758 const u8 *data_ptr = NULL;
2759 u8 *dest_image_ptr = NULL;
2760 size_t image_size = 0;
2761 u32 chunk_size = 0;
2762 u32 data_written = 0;
2763 u32 offset = 0;
2764 int status = 0;
2765 u8 add_status = 0;
84517482 2766
485bf569 2767 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2768 dev_err(&adapter->pdev->dev,
485bf569
SN
2769 "FW Image not properly aligned. "
2770 "Length must be 4 byte aligned.\n");
2771 status = -EINVAL;
2772 goto lancer_fw_exit;
d9efd2af
SB
2773 }
2774
485bf569
SN
2775 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2776 + LANCER_FW_DOWNLOAD_CHUNK;
2777 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2778 &flash_cmd.dma, GFP_KERNEL);
2779 if (!flash_cmd.va) {
2780 status = -ENOMEM;
2781 dev_err(&adapter->pdev->dev,
2782 "Memory allocation failure while flashing\n");
2783 goto lancer_fw_exit;
2784 }
84517482 2785
485bf569
SN
2786 dest_image_ptr = flash_cmd.va +
2787 sizeof(struct lancer_cmd_req_write_object);
2788 image_size = fw->size;
2789 data_ptr = fw->data;
2790
2791 while (image_size) {
2792 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2793
2794 /* Copy the image chunk content. */
2795 memcpy(dest_image_ptr, data_ptr, chunk_size);
2796
2797 status = lancer_cmd_write_object(adapter, &flash_cmd,
2798 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2799 &data_written, &add_status);
2800
2801 if (status)
2802 break;
2803
2804 offset += data_written;
2805 data_ptr += data_written;
2806 image_size -= data_written;
2807 }
2808
2809 if (!status) {
2810 /* Commit the FW written */
2811 status = lancer_cmd_write_object(adapter, &flash_cmd,
2812 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2813 &data_written, &add_status);
2814 }
2815
2816 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2817 flash_cmd.dma);
2818 if (status) {
2819 dev_err(&adapter->pdev->dev,
2820 "Firmware load error. "
2821 "Status code: 0x%x Additional Status: 0x%x\n",
2822 status, add_status);
2823 goto lancer_fw_exit;
2824 }
2825
2826 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2827lancer_fw_exit:
2828 return status;
2829}
2830
2831static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2832{
2833 struct flash_file_hdr_g2 *fhdr;
2834 struct flash_file_hdr_g3 *fhdr3;
2835 struct image_hdr *img_hdr_ptr = NULL;
2836 struct be_dma_mem flash_cmd;
2837 const u8 *p;
2838 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2839
2840 p = fw->data;
3f0d4560 2841 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2842
84517482 2843 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2844 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2845 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2846 if (!flash_cmd.va) {
2847 status = -ENOMEM;
2848 dev_err(&adapter->pdev->dev,
2849 "Memory allocation failure while flashing\n");
485bf569 2850 goto be_fw_exit;
84517482
AK
2851 }
2852
3f0d4560
AK
2853 if ((adapter->generation == BE_GEN3) &&
2854 (get_ufigen_type(fhdr) == BE_GEN3)) {
2855 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2856 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2857 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2858 img_hdr_ptr = (struct image_hdr *) (fw->data +
2859 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2860 i * sizeof(struct image_hdr)));
2861 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2862 status = be_flash_data(adapter, fw, &flash_cmd,
2863 num_imgs);
3f0d4560
AK
2864 }
2865 } else if ((adapter->generation == BE_GEN2) &&
2866 (get_ufigen_type(fhdr) == BE_GEN2)) {
2867 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2868 } else {
2869 dev_err(&adapter->pdev->dev,
2870 "UFI and Interface are not compatible for flashing\n");
2871 status = -1;
84517482
AK
2872 }
2873
2b7bcebf
IV
2874 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2875 flash_cmd.dma);
84517482
AK
2876 if (status) {
2877 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2878 goto be_fw_exit;
84517482
AK
2879 }
2880
af901ca1 2881 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2882
485bf569
SN
2883be_fw_exit:
2884 return status;
2885}
2886
2887int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2888{
2889 const struct firmware *fw;
2890 int status;
2891
2892 if (!netif_running(adapter->netdev)) {
2893 dev_err(&adapter->pdev->dev,
2894 "Firmware load not allowed (interface is down)\n");
2895 return -1;
2896 }
2897
2898 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2899 if (status)
2900 goto fw_exit;
2901
2902 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2903
2904 if (lancer_chip(adapter))
2905 status = lancer_fw_download(adapter, fw);
2906 else
2907 status = be_fw_download(adapter, fw);
2908
84517482
AK
2909fw_exit:
2910 release_firmware(fw);
2911 return status;
2912}
2913
6b7c5b94
SP
2914static struct net_device_ops be_netdev_ops = {
2915 .ndo_open = be_open,
2916 .ndo_stop = be_close,
2917 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2918 .ndo_set_rx_mode = be_set_multicast_list,
2919 .ndo_set_mac_address = be_mac_addr_set,
2920 .ndo_change_mtu = be_change_mtu,
2921 .ndo_validate_addr = eth_validate_addr,
2922 .ndo_vlan_rx_register = be_vlan_register,
2923 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2924 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2925 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2926 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2927 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2928 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2929};
2930
2931static void be_netdev_init(struct net_device *netdev)
2932{
2933 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2934 struct be_rx_obj *rxo;
2935 int i;
6b7c5b94 2936
6332c8d3 2937 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2938 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2939 NETIF_F_HW_VLAN_TX;
2940 if (be_multi_rxq(adapter))
2941 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2942
2943 netdev->features |= netdev->hw_features |
8b8ddc68 2944 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2945
eb8a50d9 2946 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 2947 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2948
6b7c5b94
SP
2949 netdev->flags |= IFF_MULTICAST;
2950
9e90c961
AK
2951 /* Default settings for Rx and Tx flow control */
2952 adapter->rx_fc = true;
2953 adapter->tx_fc = true;
2954
c190e3c8
AK
2955 netif_set_gso_max_size(netdev, 65535);
2956
6b7c5b94
SP
2957 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2958
2959 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2960
3abcdeda
SP
2961 for_all_rx_queues(adapter, rxo, i)
2962 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2963 BE_NAPI_WEIGHT);
2964
5fb379ee 2965 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2966 BE_NAPI_WEIGHT);
6b7c5b94
SP
2967}
2968
2969static void be_unmap_pci_bars(struct be_adapter *adapter)
2970{
8788fdc2
SP
2971 if (adapter->csr)
2972 iounmap(adapter->csr);
2973 if (adapter->db)
2974 iounmap(adapter->db);
ba343c77 2975 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2976 iounmap(adapter->pcicfg);
6b7c5b94
SP
2977}
2978
2979static int be_map_pci_bars(struct be_adapter *adapter)
2980{
2981 u8 __iomem *addr;
ba343c77 2982 int pcicfg_reg, db_reg;
6b7c5b94 2983
fe6d2a38
SP
2984 if (lancer_chip(adapter)) {
2985 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2986 pci_resource_len(adapter->pdev, 0));
2987 if (addr == NULL)
2988 return -ENOMEM;
2989 adapter->db = addr;
2990 return 0;
2991 }
2992
ba343c77
SB
2993 if (be_physfn(adapter)) {
2994 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2995 pci_resource_len(adapter->pdev, 2));
2996 if (addr == NULL)
2997 return -ENOMEM;
2998 adapter->csr = addr;
2999 }
6b7c5b94 3000
ba343c77 3001 if (adapter->generation == BE_GEN2) {
7b139c83 3002 pcicfg_reg = 1;
ba343c77
SB
3003 db_reg = 4;
3004 } else {
7b139c83 3005 pcicfg_reg = 0;
ba343c77
SB
3006 if (be_physfn(adapter))
3007 db_reg = 4;
3008 else
3009 db_reg = 0;
3010 }
3011 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3012 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3013 if (addr == NULL)
3014 goto pci_map_err;
ba343c77
SB
3015 adapter->db = addr;
3016
3017 if (be_physfn(adapter)) {
3018 addr = ioremap_nocache(
3019 pci_resource_start(adapter->pdev, pcicfg_reg),
3020 pci_resource_len(adapter->pdev, pcicfg_reg));
3021 if (addr == NULL)
3022 goto pci_map_err;
3023 adapter->pcicfg = addr;
3024 } else
3025 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
3026
3027 return 0;
3028pci_map_err:
3029 be_unmap_pci_bars(adapter);
3030 return -ENOMEM;
3031}
3032
3033
3034static void be_ctrl_cleanup(struct be_adapter *adapter)
3035{
8788fdc2 3036 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3037
3038 be_unmap_pci_bars(adapter);
3039
3040 if (mem->va)
2b7bcebf
IV
3041 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3042 mem->dma);
e7b909a6
SP
3043
3044 mem = &adapter->mc_cmd_mem;
3045 if (mem->va)
2b7bcebf
IV
3046 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3047 mem->dma);
6b7c5b94
SP
3048}
3049
6b7c5b94
SP
3050static int be_ctrl_init(struct be_adapter *adapter)
3051{
8788fdc2
SP
3052 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3053 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 3054 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 3055 int status;
6b7c5b94
SP
3056
3057 status = be_map_pci_bars(adapter);
3058 if (status)
e7b909a6 3059 goto done;
6b7c5b94
SP
3060
3061 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3062 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3063 mbox_mem_alloc->size,
3064 &mbox_mem_alloc->dma,
3065 GFP_KERNEL);
6b7c5b94 3066 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3067 status = -ENOMEM;
3068 goto unmap_pci_bars;
6b7c5b94 3069 }
e7b909a6 3070
6b7c5b94
SP
3071 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3072 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3073 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3074 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
3075
3076 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2b7bcebf
IV
3077 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3078 mc_cmd_mem->size, &mc_cmd_mem->dma,
3079 GFP_KERNEL);
e7b909a6
SP
3080 if (mc_cmd_mem->va == NULL) {
3081 status = -ENOMEM;
3082 goto free_mbox;
3083 }
3084 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3085
2984961c 3086 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3087 spin_lock_init(&adapter->mcc_lock);
3088 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3089
dd131e76 3090 init_completion(&adapter->flash_compl);
cf588477 3091 pci_save_state(adapter->pdev);
6b7c5b94 3092 return 0;
e7b909a6
SP
3093
3094free_mbox:
2b7bcebf
IV
3095 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3096 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3097
3098unmap_pci_bars:
3099 be_unmap_pci_bars(adapter);
3100
3101done:
3102 return status;
6b7c5b94
SP
3103}
3104
3105static void be_stats_cleanup(struct be_adapter *adapter)
3106{
3abcdeda 3107 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3108
3109 if (cmd->va)
2b7bcebf
IV
3110 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3111 cmd->va, cmd->dma);
6b7c5b94
SP
3112}
3113
3114static int be_stats_init(struct be_adapter *adapter)
3115{
3abcdeda 3116 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3117
005d5696 3118 if (adapter->generation == BE_GEN2) {
89a88ab8 3119 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3120 } else {
3121 if (lancer_chip(adapter))
3122 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3123 else
3124 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3125 }
2b7bcebf
IV
3126 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3127 GFP_KERNEL);
6b7c5b94
SP
3128 if (cmd->va == NULL)
3129 return -1;
d291b9af 3130 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3131 return 0;
3132}
3133
3134static void __devexit be_remove(struct pci_dev *pdev)
3135{
3136 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3137
6b7c5b94
SP
3138 if (!adapter)
3139 return;
3140
f203af70
SK
3141 cancel_delayed_work_sync(&adapter->work);
3142
6b7c5b94
SP
3143 unregister_netdev(adapter->netdev);
3144
5fb379ee
SP
3145 be_clear(adapter);
3146
6b7c5b94
SP
3147 be_stats_cleanup(adapter);
3148
3149 be_ctrl_cleanup(adapter);
3150
48f5a191 3151 kfree(adapter->vf_cfg);
ba343c77
SB
3152 be_sriov_disable(adapter);
3153
8d56ff11 3154 be_msix_disable(adapter);
6b7c5b94
SP
3155
3156 pci_set_drvdata(pdev, NULL);
3157 pci_release_regions(pdev);
3158 pci_disable_device(pdev);
3159
3160 free_netdev(adapter->netdev);
3161}
3162
2243e2e9 3163static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3164{
6b7c5b94 3165 int status;
2243e2e9 3166 u8 mac[ETH_ALEN];
6b7c5b94 3167
2243e2e9 3168 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
3169 if (status)
3170 return status;
3171
3abcdeda
SP
3172 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3173 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3174 if (status)
3175 return status;
3176
2243e2e9 3177 memset(mac, 0, ETH_ALEN);
ba343c77 3178
12f4d0a8
ME
3179 /* A default permanent address is given to each VF for Lancer*/
3180 if (be_physfn(adapter) || lancer_chip(adapter)) {
ba343c77 3181 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 3182 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 3183
ba343c77
SB
3184 if (status)
3185 return status;
ca9e4988 3186
ba343c77
SB
3187 if (!is_valid_ether_addr(mac))
3188 return -EADDRNOTAVAIL;
3189
3190 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3191 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3192 }
6b7c5b94 3193
3486be29 3194 if (adapter->function_mode & 0x400)
82903e4b
AK
3195 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3196 else
3197 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3198
9e1453c5
AK
3199 status = be_cmd_get_cntl_attributes(adapter);
3200 if (status)
3201 return status;
3202
2e588f84 3203 be_cmd_check_native_mode(adapter);
3c8def97
SP
3204
3205 if ((num_vfs && adapter->sriov_enabled) ||
3206 (adapter->function_mode & 0x400) ||
3207 lancer_chip(adapter) || !be_physfn(adapter)) {
3208 adapter->num_tx_qs = 1;
3209 netif_set_real_num_tx_queues(adapter->netdev,
3210 adapter->num_tx_qs);
3211 } else {
3212 adapter->num_tx_qs = MAX_TX_QS;
3213 }
3214
2243e2e9 3215 return 0;
6b7c5b94
SP
3216}
3217
fe6d2a38
SP
3218static int be_dev_family_check(struct be_adapter *adapter)
3219{
3220 struct pci_dev *pdev = adapter->pdev;
3221 u32 sli_intf = 0, if_type;
3222
3223 switch (pdev->device) {
3224 case BE_DEVICE_ID1:
3225 case OC_DEVICE_ID1:
3226 adapter->generation = BE_GEN2;
3227 break;
3228 case BE_DEVICE_ID2:
3229 case OC_DEVICE_ID2:
3230 adapter->generation = BE_GEN3;
3231 break;
3232 case OC_DEVICE_ID3:
12f4d0a8 3233 case OC_DEVICE_ID4:
fe6d2a38
SP
3234 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3235 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3236 SLI_INTF_IF_TYPE_SHIFT;
3237
3238 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3239 if_type != 0x02) {
3240 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3241 return -EINVAL;
3242 }
fe6d2a38
SP
3243 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3244 SLI_INTF_FAMILY_SHIFT);
3245 adapter->generation = BE_GEN3;
3246 break;
3247 default:
3248 adapter->generation = 0;
3249 }
3250 return 0;
3251}
3252
37eed1cb
PR
3253static int lancer_wait_ready(struct be_adapter *adapter)
3254{
3255#define SLIPORT_READY_TIMEOUT 500
3256 u32 sliport_status;
3257 int status = 0, i;
3258
3259 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3260 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3261 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3262 break;
3263
3264 msleep(20);
3265 }
3266
3267 if (i == SLIPORT_READY_TIMEOUT)
3268 status = -1;
3269
3270 return status;
3271}
3272
3273static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3274{
3275 int status;
3276 u32 sliport_status, err, reset_needed;
3277 status = lancer_wait_ready(adapter);
3278 if (!status) {
3279 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3280 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3281 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3282 if (err && reset_needed) {
3283 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3284 adapter->db + SLIPORT_CONTROL_OFFSET);
3285
3286 /* check adapter has corrected the error */
3287 status = lancer_wait_ready(adapter);
3288 sliport_status = ioread32(adapter->db +
3289 SLIPORT_STATUS_OFFSET);
3290 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3291 SLIPORT_STATUS_RN_MASK);
3292 if (status || sliport_status)
3293 status = -1;
3294 } else if (err || reset_needed) {
3295 status = -1;
3296 }
3297 }
3298 return status;
3299}
3300
6b7c5b94
SP
3301static int __devinit be_probe(struct pci_dev *pdev,
3302 const struct pci_device_id *pdev_id)
3303{
3304 int status = 0;
3305 struct be_adapter *adapter;
3306 struct net_device *netdev;
6b7c5b94
SP
3307
3308 status = pci_enable_device(pdev);
3309 if (status)
3310 goto do_none;
3311
3312 status = pci_request_regions(pdev, DRV_NAME);
3313 if (status)
3314 goto disable_dev;
3315 pci_set_master(pdev);
3316
3c8def97 3317 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3318 if (netdev == NULL) {
3319 status = -ENOMEM;
3320 goto rel_reg;
3321 }
3322 adapter = netdev_priv(netdev);
3323 adapter->pdev = pdev;
3324 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3325
3326 status = be_dev_family_check(adapter);
63657b9c 3327 if (status)
fe6d2a38
SP
3328 goto free_netdev;
3329
6b7c5b94 3330 adapter->netdev = netdev;
2243e2e9 3331 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3332
2b7bcebf 3333 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3334 if (!status) {
3335 netdev->features |= NETIF_F_HIGHDMA;
3336 } else {
2b7bcebf 3337 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3338 if (status) {
3339 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3340 goto free_netdev;
3341 }
3342 }
3343
ba343c77 3344 be_sriov_enable(adapter);
48f5a191
AK
3345 if (adapter->sriov_enabled) {
3346 adapter->vf_cfg = kcalloc(num_vfs,
3347 sizeof(struct be_vf_cfg), GFP_KERNEL);
3348
3349 if (!adapter->vf_cfg)
3350 goto free_netdev;
3351 }
ba343c77 3352
6b7c5b94
SP
3353 status = be_ctrl_init(adapter);
3354 if (status)
48f5a191 3355 goto free_vf_cfg;
6b7c5b94 3356
37eed1cb
PR
3357 if (lancer_chip(adapter)) {
3358 status = lancer_test_and_set_rdy_state(adapter);
3359 if (status) {
3360 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3361 goto ctrl_clean;
37eed1cb
PR
3362 }
3363 }
3364
2243e2e9 3365 /* sync up with fw's ready state */
ba343c77
SB
3366 if (be_physfn(adapter)) {
3367 status = be_cmd_POST(adapter);
3368 if (status)
3369 goto ctrl_clean;
ba343c77 3370 }
6b7c5b94 3371
2243e2e9
SP
3372 /* tell fw we're ready to fire cmds */
3373 status = be_cmd_fw_init(adapter);
6b7c5b94 3374 if (status)
2243e2e9
SP
3375 goto ctrl_clean;
3376
a4b4dfab
AK
3377 status = be_cmd_reset_function(adapter);
3378 if (status)
3379 goto ctrl_clean;
556ae191 3380
2243e2e9
SP
3381 status = be_stats_init(adapter);
3382 if (status)
3383 goto ctrl_clean;
3384
3385 status = be_get_config(adapter);
6b7c5b94
SP
3386 if (status)
3387 goto stats_clean;
6b7c5b94 3388
3abcdeda
SP
3389 be_msix_enable(adapter);
3390
6b7c5b94 3391 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3392
5fb379ee
SP
3393 status = be_setup(adapter);
3394 if (status)
3abcdeda 3395 goto msix_disable;
2243e2e9 3396
3abcdeda 3397 be_netdev_init(netdev);
6b7c5b94
SP
3398 status = register_netdev(netdev);
3399 if (status != 0)
5fb379ee 3400 goto unsetup;
63a76944 3401 netif_carrier_off(netdev);
6b7c5b94 3402
e6319365 3403 if (be_physfn(adapter) && adapter->sriov_enabled) {
d0381c42
AK
3404 u8 mac_speed;
3405 bool link_up;
3406 u16 vf, lnk_speed;
3407
12f4d0a8
ME
3408 if (!lancer_chip(adapter)) {
3409 status = be_vf_eth_addr_config(adapter);
3410 if (status)
3411 goto unreg_netdev;
3412 }
d0381c42
AK
3413
3414 for (vf = 0; vf < num_vfs; vf++) {
3415 status = be_cmd_link_status_query(adapter, &link_up,
3416 &mac_speed, &lnk_speed, vf + 1);
3417 if (!status)
3418 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3419 else
3420 goto unreg_netdev;
3421 }
e6319365
AK
3422 }
3423
c4ca2374 3424 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3425
f203af70 3426 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3427 return 0;
3428
e6319365
AK
3429unreg_netdev:
3430 unregister_netdev(netdev);
5fb379ee
SP
3431unsetup:
3432 be_clear(adapter);
3abcdeda
SP
3433msix_disable:
3434 be_msix_disable(adapter);
6b7c5b94
SP
3435stats_clean:
3436 be_stats_cleanup(adapter);
3437ctrl_clean:
3438 be_ctrl_cleanup(adapter);
48f5a191
AK
3439free_vf_cfg:
3440 kfree(adapter->vf_cfg);
6b7c5b94 3441free_netdev:
ba343c77 3442 be_sriov_disable(adapter);
fe6d2a38 3443 free_netdev(netdev);
8d56ff11 3444 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3445rel_reg:
3446 pci_release_regions(pdev);
3447disable_dev:
3448 pci_disable_device(pdev);
3449do_none:
c4ca2374 3450 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3451 return status;
3452}
3453
3454static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3455{
3456 struct be_adapter *adapter = pci_get_drvdata(pdev);
3457 struct net_device *netdev = adapter->netdev;
3458
a4ca055f 3459 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3460 if (adapter->wol)
3461 be_setup_wol(adapter, true);
3462
6b7c5b94
SP
3463 netif_device_detach(netdev);
3464 if (netif_running(netdev)) {
3465 rtnl_lock();
3466 be_close(netdev);
3467 rtnl_unlock();
3468 }
9e90c961 3469 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3470 be_clear(adapter);
6b7c5b94 3471
a4ca055f 3472 be_msix_disable(adapter);
6b7c5b94
SP
3473 pci_save_state(pdev);
3474 pci_disable_device(pdev);
3475 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3476 return 0;
3477}
3478
3479static int be_resume(struct pci_dev *pdev)
3480{
3481 int status = 0;
3482 struct be_adapter *adapter = pci_get_drvdata(pdev);
3483 struct net_device *netdev = adapter->netdev;
3484
3485 netif_device_detach(netdev);
3486
3487 status = pci_enable_device(pdev);
3488 if (status)
3489 return status;
3490
3491 pci_set_power_state(pdev, 0);
3492 pci_restore_state(pdev);
3493
a4ca055f 3494 be_msix_enable(adapter);
2243e2e9
SP
3495 /* tell fw we're ready to fire cmds */
3496 status = be_cmd_fw_init(adapter);
3497 if (status)
3498 return status;
3499
9b0365f1 3500 be_setup(adapter);
6b7c5b94
SP
3501 if (netif_running(netdev)) {
3502 rtnl_lock();
3503 be_open(netdev);
3504 rtnl_unlock();
3505 }
3506 netif_device_attach(netdev);
71d8d1b5
AK
3507
3508 if (adapter->wol)
3509 be_setup_wol(adapter, false);
a4ca055f
AK
3510
3511 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3512 return 0;
3513}
3514
82456b03
SP
3515/*
3516 * An FLR will stop BE from DMAing any data.
3517 */
3518static void be_shutdown(struct pci_dev *pdev)
3519{
3520 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3521
2d5d4154
AK
3522 if (!adapter)
3523 return;
82456b03 3524
0f4a6828 3525 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3526
2d5d4154 3527 netif_device_detach(adapter->netdev);
82456b03 3528
82456b03
SP
3529 if (adapter->wol)
3530 be_setup_wol(adapter, true);
3531
57841869
AK
3532 be_cmd_reset_function(adapter);
3533
82456b03 3534 pci_disable_device(pdev);
82456b03
SP
3535}
3536
cf588477
SP
3537static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3538 pci_channel_state_t state)
3539{
3540 struct be_adapter *adapter = pci_get_drvdata(pdev);
3541 struct net_device *netdev = adapter->netdev;
3542
3543 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3544
3545 adapter->eeh_err = true;
3546
3547 netif_device_detach(netdev);
3548
3549 if (netif_running(netdev)) {
3550 rtnl_lock();
3551 be_close(netdev);
3552 rtnl_unlock();
3553 }
3554 be_clear(adapter);
3555
3556 if (state == pci_channel_io_perm_failure)
3557 return PCI_ERS_RESULT_DISCONNECT;
3558
3559 pci_disable_device(pdev);
3560
3561 return PCI_ERS_RESULT_NEED_RESET;
3562}
3563
3564static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3565{
3566 struct be_adapter *adapter = pci_get_drvdata(pdev);
3567 int status;
3568
3569 dev_info(&adapter->pdev->dev, "EEH reset\n");
3570 adapter->eeh_err = false;
3571
3572 status = pci_enable_device(pdev);
3573 if (status)
3574 return PCI_ERS_RESULT_DISCONNECT;
3575
3576 pci_set_master(pdev);
3577 pci_set_power_state(pdev, 0);
3578 pci_restore_state(pdev);
3579
3580 /* Check if card is ok and fw is ready */
3581 status = be_cmd_POST(adapter);
3582 if (status)
3583 return PCI_ERS_RESULT_DISCONNECT;
3584
3585 return PCI_ERS_RESULT_RECOVERED;
3586}
3587
3588static void be_eeh_resume(struct pci_dev *pdev)
3589{
3590 int status = 0;
3591 struct be_adapter *adapter = pci_get_drvdata(pdev);
3592 struct net_device *netdev = adapter->netdev;
3593
3594 dev_info(&adapter->pdev->dev, "EEH resume\n");
3595
3596 pci_save_state(pdev);
3597
3598 /* tell fw we're ready to fire cmds */
3599 status = be_cmd_fw_init(adapter);
3600 if (status)
3601 goto err;
3602
3603 status = be_setup(adapter);
3604 if (status)
3605 goto err;
3606
3607 if (netif_running(netdev)) {
3608 status = be_open(netdev);
3609 if (status)
3610 goto err;
3611 }
3612 netif_device_attach(netdev);
3613 return;
3614err:
3615 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3616}
3617
3618static struct pci_error_handlers be_eeh_handlers = {
3619 .error_detected = be_eeh_err_detected,
3620 .slot_reset = be_eeh_reset,
3621 .resume = be_eeh_resume,
3622};
3623
6b7c5b94
SP
3624static struct pci_driver be_driver = {
3625 .name = DRV_NAME,
3626 .id_table = be_dev_ids,
3627 .probe = be_probe,
3628 .remove = be_remove,
3629 .suspend = be_suspend,
cf588477 3630 .resume = be_resume,
82456b03 3631 .shutdown = be_shutdown,
cf588477 3632 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3633};
3634
3635static int __init be_init_module(void)
3636{
8e95a202
JP
3637 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3638 rx_frag_size != 2048) {
6b7c5b94
SP
3639 printk(KERN_WARNING DRV_NAME
3640 " : Module param rx_frag_size must be 2048/4096/8192."
3641 " Using 2048\n");
3642 rx_frag_size = 2048;
3643 }
6b7c5b94
SP
3644
3645 return pci_register_driver(&be_driver);
3646}
3647module_init(be_init_module);
3648
3649static void __exit be_exit_module(void)
3650{
3651 pci_unregister_driver(&be_driver);
3652}
3653module_exit(be_exit_module);
This page took 0.564401 seconds and 5 git commands to generate.