drivers/net: Add module.h to drivers who were implicitly using it
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
2e588f84 30static ushort rx_frag_size = 2048;
ba343c77 31static unsigned int num_vfs;
2e588f84 32module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 33module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 34MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
6b7c5b94 37static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
44 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 47/* UE Status Low CSR */
42c8b11e 48static const char * const ue_status_low_desc[] = {
7c185276
AK
49 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
42c8b11e 83static const char * const ue_status_hi_desc[] = {
7c185276
AK
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
42c8b11e 107 "NETC",
7c185276
AK
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
6b7c5b94 117
752961a1
SP
118/* Is BE in a multi-channel mode */
119static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
123}
124
6b7c5b94
SP
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va)
2b7bcebf
IV
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
6b7c5b94
SP
131}
132
133static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
135{
136 struct be_dma_mem *mem = &q->dma_mem;
137
138 memset(q, 0, sizeof(*q));
139 q->len = len;
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
2b7bcebf
IV
142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143 GFP_KERNEL);
6b7c5b94
SP
144 if (!mem->va)
145 return -1;
146 memset(mem->va, 0, mem->size);
147 return 0;
148}
149
8788fdc2 150static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 151{
db3ea781 152 u32 reg, enabled;
5f0b849e 153
cf588477
SP
154 if (adapter->eeh_err)
155 return;
156
db3ea781
SP
157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 &reg);
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
5f0b849e 161 if (!enabled && enable)
6b7c5b94 162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else if (enabled && !enable)
6b7c5b94 164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else
6b7c5b94 166 return;
5f0b849e 167
db3ea781
SP
168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
170}
171
8788fdc2 172static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
173{
174 u32 val = 0;
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
177
178 wmb();
8788fdc2 179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
180}
181
8788fdc2 182static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
183{
184 u32 val = 0;
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
187
188 wmb();
8788fdc2 189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
190}
191
8788fdc2 192static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
193 bool arm, bool clear_int, u16 num_popped)
194{
195 u32 val = 0;
196 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
199
200 if (adapter->eeh_err)
201 return;
202
6b7c5b94
SP
203 if (arm)
204 val |= 1 << DB_EQ_REARM_SHIFT;
205 if (clear_int)
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
210}
211
8788fdc2 212void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
213{
214 u32 val = 0;
215 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
218
219 if (adapter->eeh_err)
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
226}
227
6b7c5b94
SP
228static int be_mac_addr_set(struct net_device *netdev, void *p)
229{
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
232 int status = 0;
233
ca9e4988
AK
234 if (!is_valid_ether_addr(addr->sa_data))
235 return -EADDRNOTAVAIL;
236
ba343c77
SB
237 /* MAC addr configuration will be done in hardware for VFs
238 * by their corresponding PFs. Just copy to netdev addr here
239 */
240 if (!be_physfn(adapter))
241 goto netdev_addr;
242
f8617e08
AK
243 status = be_cmd_pmac_del(adapter, adapter->if_handle,
244 adapter->pmac_id, 0);
a65027e4
SP
245 if (status)
246 return status;
6b7c5b94 247
a65027e4 248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 249 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 250netdev_addr:
6b7c5b94
SP
251 if (!status)
252 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
253
254 return status;
255}
256
89a88ab8
AK
257static void populate_be2_stats(struct be_adapter *adapter)
258{
ac124ff9
SP
259 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
260 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
261 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 262 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
263 &rxf_stats->port[adapter->port_num];
264 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 265
ac124ff9 266 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
267 drvs->rx_pause_frames = port_stats->rx_pause_frames;
268 drvs->rx_crc_errors = port_stats->rx_crc_errors;
269 drvs->rx_control_frames = port_stats->rx_control_frames;
270 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
271 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
272 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
273 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
274 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
275 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
276 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
277 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
278 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
279 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
280 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 281 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
282 drvs->rx_dropped_header_too_small =
283 port_stats->rx_dropped_header_too_small;
ac124ff9 284 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
285 drvs->rx_alignment_symbol_errors =
286 port_stats->rx_alignment_symbol_errors;
287
288 drvs->tx_pauseframes = port_stats->tx_pauseframes;
289 drvs->tx_controlframes = port_stats->tx_controlframes;
290
291 if (adapter->port_num)
ac124ff9 292 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 293 else
ac124ff9 294 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
295 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
296 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
297 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
298 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
299 drvs->forwarded_packets = rxf_stats->forwarded_packets;
300 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
301 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
302 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
303 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
304}
305
306static void populate_be3_stats(struct be_adapter *adapter)
307{
ac124ff9
SP
308 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
309 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
310 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 311 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
312 &rxf_stats->port[adapter->port_num];
313 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 314
ac124ff9 315 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
316 drvs->rx_pause_frames = port_stats->rx_pause_frames;
317 drvs->rx_crc_errors = port_stats->rx_crc_errors;
318 drvs->rx_control_frames = port_stats->rx_control_frames;
319 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
320 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
321 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
322 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
323 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
324 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
325 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
326 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
327 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
328 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
329 drvs->rx_dropped_header_too_small =
330 port_stats->rx_dropped_header_too_small;
331 drvs->rx_input_fifo_overflow_drop =
332 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 333 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
334 drvs->rx_alignment_symbol_errors =
335 port_stats->rx_alignment_symbol_errors;
ac124ff9 336 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
337 drvs->tx_pauseframes = port_stats->tx_pauseframes;
338 drvs->tx_controlframes = port_stats->tx_controlframes;
339 drvs->jabber_events = port_stats->jabber_events;
340 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
341 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
342 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
343 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
344 drvs->forwarded_packets = rxf_stats->forwarded_packets;
345 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
346 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
347 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
348 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
349}
350
005d5696
SX
351static void populate_lancer_stats(struct be_adapter *adapter)
352{
89a88ab8 353
005d5696 354 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
355 struct lancer_pport_stats *pport_stats =
356 pport_stats_from_cmd(adapter);
357
358 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
359 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
360 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
361 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 362 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 363 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
364 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
365 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
366 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
367 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
368 drvs->rx_dropped_tcp_length =
369 pport_stats->rx_dropped_invalid_tcp_length;
370 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
371 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
372 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
373 drvs->rx_dropped_header_too_small =
374 pport_stats->rx_dropped_header_too_small;
375 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
376 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 377 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 378 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
379 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
380 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 381 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 382 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
383 drvs->forwarded_packets = pport_stats->num_forwards_lo;
384 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 385 drvs->rx_drops_too_many_frags =
ac124ff9 386 pport_stats->rx_drops_too_many_frags_lo;
005d5696 387}
89a88ab8 388
09c1c68f
SP
389static void accumulate_16bit_val(u32 *acc, u16 val)
390{
391#define lo(x) (x & 0xFFFF)
392#define hi(x) (x & 0xFFFF0000)
393 bool wrapped = val < lo(*acc);
394 u32 newacc = hi(*acc) + val;
395
396 if (wrapped)
397 newacc += 65536;
398 ACCESS_ONCE(*acc) = newacc;
399}
400
89a88ab8
AK
401void be_parse_stats(struct be_adapter *adapter)
402{
ac124ff9
SP
403 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
404 struct be_rx_obj *rxo;
405 int i;
406
005d5696
SX
407 if (adapter->generation == BE_GEN3) {
408 if (lancer_chip(adapter))
409 populate_lancer_stats(adapter);
410 else
411 populate_be3_stats(adapter);
412 } else {
89a88ab8 413 populate_be2_stats(adapter);
005d5696 414 }
ac124ff9
SP
415
416 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
417 for_all_rx_queues(adapter, rxo, i) {
418 /* below erx HW counter can actually wrap around after
419 * 65535. Driver accumulates a 32-bit value
420 */
421 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
422 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
423 }
89a88ab8
AK
424}
425
ab1594e9
SP
426static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
427 struct rtnl_link_stats64 *stats)
6b7c5b94 428{
ab1594e9 429 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 430 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 431 struct be_rx_obj *rxo;
3c8def97 432 struct be_tx_obj *txo;
ab1594e9
SP
433 u64 pkts, bytes;
434 unsigned int start;
3abcdeda 435 int i;
6b7c5b94 436
3abcdeda 437 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
438 const struct be_rx_stats *rx_stats = rx_stats(rxo);
439 do {
440 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
441 pkts = rx_stats(rxo)->rx_pkts;
442 bytes = rx_stats(rxo)->rx_bytes;
443 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
444 stats->rx_packets += pkts;
445 stats->rx_bytes += bytes;
446 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
447 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
448 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
449 }
450
3c8def97 451 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
452 const struct be_tx_stats *tx_stats = tx_stats(txo);
453 do {
454 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
455 pkts = tx_stats(txo)->tx_pkts;
456 bytes = tx_stats(txo)->tx_bytes;
457 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
458 stats->tx_packets += pkts;
459 stats->tx_bytes += bytes;
3c8def97 460 }
6b7c5b94
SP
461
462 /* bad pkts received */
ab1594e9 463 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
464 drvs->rx_alignment_symbol_errors +
465 drvs->rx_in_range_errors +
466 drvs->rx_out_range_errors +
467 drvs->rx_frame_too_long +
468 drvs->rx_dropped_too_small +
469 drvs->rx_dropped_too_short +
470 drvs->rx_dropped_header_too_small +
471 drvs->rx_dropped_tcp_length +
ab1594e9 472 drvs->rx_dropped_runt;
68110868 473
6b7c5b94 474 /* detailed rx errors */
ab1594e9 475 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
476 drvs->rx_out_range_errors +
477 drvs->rx_frame_too_long;
68110868 478
ab1594e9 479 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
480
481 /* frame alignment errors */
ab1594e9 482 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 483
6b7c5b94
SP
484 /* receiver fifo overrun */
485 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 486 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
487 drvs->rx_input_fifo_overflow_drop +
488 drvs->rx_drops_no_pbuf;
ab1594e9 489 return stats;
6b7c5b94
SP
490}
491
ea172a01 492void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 493{
6b7c5b94
SP
494 struct net_device *netdev = adapter->netdev;
495
ea172a01
SP
496 /* when link status changes, link speed must be re-queried from card */
497 adapter->link_speed = -1;
498 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
499 netif_carrier_on(netdev);
500 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
501 } else {
502 netif_carrier_off(netdev);
503 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 504 }
6b7c5b94
SP
505}
506
3c8def97 507static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 508 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 509{
3c8def97
SP
510 struct be_tx_stats *stats = tx_stats(txo);
511
ab1594e9 512 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
513 stats->tx_reqs++;
514 stats->tx_wrbs += wrb_cnt;
515 stats->tx_bytes += copied;
516 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 517 if (stopped)
ac124ff9 518 stats->tx_stops++;
ab1594e9 519 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
520}
521
522/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
523static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
524 bool *dummy)
6b7c5b94 525{
ebc8d2ab
DM
526 int cnt = (skb->len > skb->data_len);
527
528 cnt += skb_shinfo(skb)->nr_frags;
529
6b7c5b94
SP
530 /* to account for hdr wrb */
531 cnt++;
fe6d2a38
SP
532 if (lancer_chip(adapter) || !(cnt & 1)) {
533 *dummy = false;
534 } else {
6b7c5b94
SP
535 /* add a dummy to make it an even num */
536 cnt++;
537 *dummy = true;
fe6d2a38 538 }
6b7c5b94
SP
539 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
540 return cnt;
541}
542
543static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
544{
545 wrb->frag_pa_hi = upper_32_bits(addr);
546 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
547 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
548}
549
cc4ce020
SK
550static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
551 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 552{
cc4ce020
SK
553 u8 vlan_prio = 0;
554 u16 vlan_tag = 0;
555
6b7c5b94
SP
556 memset(hdr, 0, sizeof(*hdr));
557
558 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
559
49e4b847 560 if (skb_is_gso(skb)) {
6b7c5b94
SP
561 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
562 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
563 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 564 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 565 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
566 if (lancer_chip(adapter) && adapter->sli_family ==
567 LANCER_A0_SLI_FAMILY) {
568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
569 if (is_tcp_pkt(skb))
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
571 tcpcs, hdr, 1);
572 else if (is_udp_pkt(skb))
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
574 udpcs, hdr, 1);
575 }
6b7c5b94
SP
576 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
577 if (is_tcp_pkt(skb))
578 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
579 else if (is_udp_pkt(skb))
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
581 }
582
4c5102f9 583 if (vlan_tx_tag_present(skb)) {
6b7c5b94 584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
585 vlan_tag = vlan_tx_tag_get(skb);
586 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
587 /* If vlan priority provided by OS is NOT in available bmap */
588 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
589 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
590 adapter->recommended_prio;
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
592 }
593
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
598}
599
2b7bcebf 600static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
601 bool unmap_single)
602{
603 dma_addr_t dma;
604
605 be_dws_le_to_cpu(wrb, sizeof(*wrb));
606
607 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 608 if (wrb->frag_len) {
7101e111 609 if (unmap_single)
2b7bcebf
IV
610 dma_unmap_single(dev, dma, wrb->frag_len,
611 DMA_TO_DEVICE);
7101e111 612 else
2b7bcebf 613 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
614 }
615}
6b7c5b94 616
3c8def97 617static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
618 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
619{
7101e111
SP
620 dma_addr_t busaddr;
621 int i, copied = 0;
2b7bcebf 622 struct device *dev = &adapter->pdev->dev;
6b7c5b94 623 struct sk_buff *first_skb = skb;
6b7c5b94
SP
624 struct be_eth_wrb *wrb;
625 struct be_eth_hdr_wrb *hdr;
7101e111
SP
626 bool map_single = false;
627 u16 map_head;
6b7c5b94 628
6b7c5b94
SP
629 hdr = queue_head_node(txq);
630 queue_head_inc(txq);
7101e111 631 map_head = txq->head;
6b7c5b94 632
ebc8d2ab 633 if (skb->len > skb->data_len) {
e743d313 634 int len = skb_headlen(skb);
2b7bcebf
IV
635 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
636 if (dma_mapping_error(dev, busaddr))
7101e111
SP
637 goto dma_err;
638 map_single = true;
ebc8d2ab
DM
639 wrb = queue_head_node(txq);
640 wrb_fill(wrb, busaddr, len);
641 be_dws_cpu_to_le(wrb, sizeof(*wrb));
642 queue_head_inc(txq);
643 copied += len;
644 }
6b7c5b94 645
ebc8d2ab 646 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 647 const struct skb_frag_struct *frag =
ebc8d2ab 648 &skb_shinfo(skb)->frags[i];
b061b39e 649 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 650 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 651 if (dma_mapping_error(dev, busaddr))
7101e111 652 goto dma_err;
ebc8d2ab 653 wrb = queue_head_node(txq);
9e903e08 654 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
655 be_dws_cpu_to_le(wrb, sizeof(*wrb));
656 queue_head_inc(txq);
9e903e08 657 copied += skb_frag_size(frag);
6b7c5b94
SP
658 }
659
660 if (dummy_wrb) {
661 wrb = queue_head_node(txq);
662 wrb_fill(wrb, 0, 0);
663 be_dws_cpu_to_le(wrb, sizeof(*wrb));
664 queue_head_inc(txq);
665 }
666
cc4ce020 667 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
668 be_dws_cpu_to_le(hdr, sizeof(*hdr));
669
670 return copied;
7101e111
SP
671dma_err:
672 txq->head = map_head;
673 while (copied) {
674 wrb = queue_head_node(txq);
2b7bcebf 675 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
676 map_single = false;
677 copied -= wrb->frag_len;
678 queue_head_inc(txq);
679 }
680 return 0;
6b7c5b94
SP
681}
682
61357325 683static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 684 struct net_device *netdev)
6b7c5b94
SP
685{
686 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
687 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
688 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
689 u32 wrb_cnt = 0, copied = 0;
690 u32 start = txq->head;
691 bool dummy_wrb, stopped = false;
692
fe6d2a38 693 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 694
3c8def97 695 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
696 if (copied) {
697 /* record the sent skb in the sent_skb table */
3c8def97
SP
698 BUG_ON(txo->sent_skb_list[start]);
699 txo->sent_skb_list[start] = skb;
c190e3c8
AK
700
701 /* Ensure txq has space for the next skb; Else stop the queue
702 * *BEFORE* ringing the tx doorbell, so that we serialze the
703 * tx compls of the current transmit which'll wake up the queue
704 */
7101e111 705 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
706 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
707 txq->len) {
3c8def97 708 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
709 stopped = true;
710 }
6b7c5b94 711
c190e3c8 712 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 713
3c8def97 714 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 715 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
716 } else {
717 txq->head = start;
718 dev_kfree_skb_any(skb);
6b7c5b94 719 }
6b7c5b94
SP
720 return NETDEV_TX_OK;
721}
722
723static int be_change_mtu(struct net_device *netdev, int new_mtu)
724{
725 struct be_adapter *adapter = netdev_priv(netdev);
726 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
727 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
728 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
729 dev_info(&adapter->pdev->dev,
730 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
731 BE_MIN_MTU,
732 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
733 return -EINVAL;
734 }
735 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
736 netdev->mtu, new_mtu);
737 netdev->mtu = new_mtu;
738 return 0;
739}
740
741/*
82903e4b
AK
742 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
743 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 744 */
1da87b7f 745static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 746{
6b7c5b94
SP
747 u16 vtag[BE_NUM_VLANS_SUPPORTED];
748 u16 ntags = 0, i;
82903e4b 749 int status = 0;
1da87b7f
AK
750 u32 if_handle;
751
752 if (vf) {
753 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
754 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
755 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
756 }
6b7c5b94 757
c0e64ef4
SP
758 /* No need to further configure vids if in promiscuous mode */
759 if (adapter->promiscuous)
760 return 0;
761
82903e4b 762 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 763 /* Construct VLAN Table to give to HW */
b738127d 764 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
765 if (adapter->vlan_tag[i]) {
766 vtag[ntags] = cpu_to_le16(i);
767 ntags++;
768 }
769 }
b31c50a7
SP
770 status = be_cmd_vlan_config(adapter, adapter->if_handle,
771 vtag, ntags, 1, 0);
6b7c5b94 772 } else {
b31c50a7
SP
773 status = be_cmd_vlan_config(adapter, adapter->if_handle,
774 NULL, 0, 1, 1);
6b7c5b94 775 }
1da87b7f 776
b31c50a7 777 return status;
6b7c5b94
SP
778}
779
6b7c5b94
SP
780static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
781{
782 struct be_adapter *adapter = netdev_priv(netdev);
783
1da87b7f 784 adapter->vlans_added++;
ba343c77
SB
785 if (!be_physfn(adapter))
786 return;
787
6b7c5b94 788 adapter->vlan_tag[vid] = 1;
82903e4b 789 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 790 be_vid_config(adapter, false, 0);
6b7c5b94
SP
791}
792
793static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
794{
795 struct be_adapter *adapter = netdev_priv(netdev);
796
1da87b7f 797 adapter->vlans_added--;
1da87b7f 798
ba343c77
SB
799 if (!be_physfn(adapter))
800 return;
801
6b7c5b94 802 adapter->vlan_tag[vid] = 0;
82903e4b 803 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 804 be_vid_config(adapter, false, 0);
6b7c5b94
SP
805}
806
a54769f5 807static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
808{
809 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 810
24307eef 811 if (netdev->flags & IFF_PROMISC) {
5b8821b7 812 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
813 adapter->promiscuous = true;
814 goto done;
6b7c5b94
SP
815 }
816
25985edc 817 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
818 if (adapter->promiscuous) {
819 adapter->promiscuous = false;
5b8821b7 820 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
821
822 if (adapter->vlans_added)
823 be_vid_config(adapter, false, 0);
6b7c5b94
SP
824 }
825
e7b909a6 826 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 827 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
828 netdev_mc_count(netdev) > BE_MAX_MC) {
829 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 830 goto done;
6b7c5b94 831 }
6b7c5b94 832
5b8821b7 833 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
834done:
835 return;
6b7c5b94
SP
836}
837
ba343c77
SB
838static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
839{
840 struct be_adapter *adapter = netdev_priv(netdev);
841 int status;
842
843 if (!adapter->sriov_enabled)
844 return -EPERM;
845
846 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
847 return -EINVAL;
848
64600ea5
AK
849 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
850 status = be_cmd_pmac_del(adapter,
851 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 852 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 853
64600ea5
AK
854 status = be_cmd_pmac_add(adapter, mac,
855 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 856 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
857
858 if (status)
ba343c77
SB
859 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
860 mac, vf);
64600ea5
AK
861 else
862 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
863
ba343c77
SB
864 return status;
865}
866
64600ea5
AK
867static int be_get_vf_config(struct net_device *netdev, int vf,
868 struct ifla_vf_info *vi)
869{
870 struct be_adapter *adapter = netdev_priv(netdev);
871
872 if (!adapter->sriov_enabled)
873 return -EPERM;
874
875 if (vf >= num_vfs)
876 return -EINVAL;
877
878 vi->vf = vf;
e1d18735 879 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 880 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
881 vi->qos = 0;
882 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
883
884 return 0;
885}
886
1da87b7f
AK
887static int be_set_vf_vlan(struct net_device *netdev,
888 int vf, u16 vlan, u8 qos)
889{
890 struct be_adapter *adapter = netdev_priv(netdev);
891 int status = 0;
892
893 if (!adapter->sriov_enabled)
894 return -EPERM;
895
896 if ((vf >= num_vfs) || (vlan > 4095))
897 return -EINVAL;
898
899 if (vlan) {
900 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
901 adapter->vlans_added++;
902 } else {
903 adapter->vf_cfg[vf].vf_vlan_tag = 0;
904 adapter->vlans_added--;
905 }
906
907 status = be_vid_config(adapter, true, vf);
908
909 if (status)
910 dev_info(&adapter->pdev->dev,
911 "VLAN %d config on VF %d failed\n", vlan, vf);
912 return status;
913}
914
e1d18735
AK
915static int be_set_vf_tx_rate(struct net_device *netdev,
916 int vf, int rate)
917{
918 struct be_adapter *adapter = netdev_priv(netdev);
919 int status = 0;
920
921 if (!adapter->sriov_enabled)
922 return -EPERM;
923
924 if ((vf >= num_vfs) || (rate < 0))
925 return -EINVAL;
926
927 if (rate > 10000)
928 rate = 10000;
929
930 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 931 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
932
933 if (status)
934 dev_info(&adapter->pdev->dev,
935 "tx rate %d on VF %d failed\n", rate, vf);
936 return status;
937}
938
ac124ff9 939static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 940{
ac124ff9
SP
941 struct be_eq_obj *rx_eq = &rxo->rx_eq;
942 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 943 ulong now = jiffies;
ac124ff9 944 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
945 u64 pkts;
946 unsigned int start, eqd;
ac124ff9
SP
947
948 if (!rx_eq->enable_aic)
949 return;
6b7c5b94 950
4097f663 951 /* Wrapped around */
3abcdeda
SP
952 if (time_before(now, stats->rx_jiffies)) {
953 stats->rx_jiffies = now;
4097f663
SP
954 return;
955 }
6b7c5b94 956
ac124ff9
SP
957 /* Update once a second */
958 if (delta < HZ)
6b7c5b94
SP
959 return;
960
ab1594e9
SP
961 do {
962 start = u64_stats_fetch_begin_bh(&stats->sync);
963 pkts = stats->rx_pkts;
964 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
965
68c3e5a7 966 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 967 stats->rx_pkts_prev = pkts;
3abcdeda 968 stats->rx_jiffies = now;
ac124ff9
SP
969 eqd = stats->rx_pps / 110000;
970 eqd = eqd << 3;
971 if (eqd > rx_eq->max_eqd)
972 eqd = rx_eq->max_eqd;
973 if (eqd < rx_eq->min_eqd)
974 eqd = rx_eq->min_eqd;
975 if (eqd < 10)
976 eqd = 0;
977 if (eqd != rx_eq->cur_eqd) {
978 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
979 rx_eq->cur_eqd = eqd;
980 }
6b7c5b94
SP
981}
982
3abcdeda 983static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 984 struct be_rx_compl_info *rxcp)
4097f663 985{
ac124ff9 986 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 987
ab1594e9 988 u64_stats_update_begin(&stats->sync);
3abcdeda 989 stats->rx_compl++;
2e588f84 990 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 991 stats->rx_pkts++;
2e588f84 992 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 993 stats->rx_mcast_pkts++;
2e588f84 994 if (rxcp->err)
ac124ff9 995 stats->rx_compl_err++;
ab1594e9 996 u64_stats_update_end(&stats->sync);
4097f663
SP
997}
998
2e588f84 999static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1000{
19fad86f
PR
1001 /* L4 checksum is not reliable for non TCP/UDP packets.
1002 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1003 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1004 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1005}
1006
6b7c5b94 1007static struct be_rx_page_info *
3abcdeda
SP
1008get_rx_page_info(struct be_adapter *adapter,
1009 struct be_rx_obj *rxo,
1010 u16 frag_idx)
6b7c5b94
SP
1011{
1012 struct be_rx_page_info *rx_page_info;
3abcdeda 1013 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1014
3abcdeda 1015 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1016 BUG_ON(!rx_page_info->page);
1017
205859a2 1018 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1019 dma_unmap_page(&adapter->pdev->dev,
1020 dma_unmap_addr(rx_page_info, bus),
1021 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1022 rx_page_info->last_page_user = false;
1023 }
6b7c5b94
SP
1024
1025 atomic_dec(&rxq->used);
1026 return rx_page_info;
1027}
1028
1029/* Throwaway the data in the Rx completion */
1030static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1031 struct be_rx_obj *rxo,
2e588f84 1032 struct be_rx_compl_info *rxcp)
6b7c5b94 1033{
3abcdeda 1034 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1035 struct be_rx_page_info *page_info;
2e588f84 1036 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1037
e80d9da6 1038 for (i = 0; i < num_rcvd; i++) {
2e588f84 1039 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1040 put_page(page_info->page);
1041 memset(page_info, 0, sizeof(*page_info));
2e588f84 1042 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1043 }
1044}
1045
1046/*
1047 * skb_fill_rx_data forms a complete skb for an ether frame
1048 * indicated by rxcp.
1049 */
3abcdeda 1050static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1051 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1052{
3abcdeda 1053 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1054 struct be_rx_page_info *page_info;
2e588f84
SP
1055 u16 i, j;
1056 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1057 u8 *start;
6b7c5b94 1058
2e588f84 1059 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1060 start = page_address(page_info->page) + page_info->page_offset;
1061 prefetch(start);
1062
1063 /* Copy data in the first descriptor of this completion */
2e588f84 1064 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1065
1066 /* Copy the header portion into skb_data */
2e588f84 1067 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1068 memcpy(skb->data, start, hdr_len);
1069 skb->len = curr_frag_len;
1070 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1071 /* Complete packet has now been moved to data */
1072 put_page(page_info->page);
1073 skb->data_len = 0;
1074 skb->tail += curr_frag_len;
1075 } else {
1076 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1077 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1078 skb_shinfo(skb)->frags[0].page_offset =
1079 page_info->page_offset + hdr_len;
9e903e08 1080 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1081 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1082 skb->truesize += rx_frag_size;
6b7c5b94
SP
1083 skb->tail += hdr_len;
1084 }
205859a2 1085 page_info->page = NULL;
6b7c5b94 1086
2e588f84
SP
1087 if (rxcp->pkt_size <= rx_frag_size) {
1088 BUG_ON(rxcp->num_rcvd != 1);
1089 return;
6b7c5b94
SP
1090 }
1091
1092 /* More frags present for this completion */
2e588f84
SP
1093 index_inc(&rxcp->rxq_idx, rxq->len);
1094 remaining = rxcp->pkt_size - curr_frag_len;
1095 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1096 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1097 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1098
bd46cb6c
AK
1099 /* Coalesce all frags from the same physical page in one slot */
1100 if (page_info->page_offset == 0) {
1101 /* Fresh page */
1102 j++;
b061b39e 1103 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1104 skb_shinfo(skb)->frags[j].page_offset =
1105 page_info->page_offset;
9e903e08 1106 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1107 skb_shinfo(skb)->nr_frags++;
1108 } else {
1109 put_page(page_info->page);
1110 }
1111
9e903e08 1112 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1113 skb->len += curr_frag_len;
1114 skb->data_len += curr_frag_len;
bdb28a97 1115 skb->truesize += rx_frag_size;
2e588f84
SP
1116 remaining -= curr_frag_len;
1117 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1118 page_info->page = NULL;
6b7c5b94 1119 }
bd46cb6c 1120 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1121}
1122
5be93b9a 1123/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1124static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1125 struct be_rx_obj *rxo,
2e588f84 1126 struct be_rx_compl_info *rxcp)
6b7c5b94 1127{
6332c8d3 1128 struct net_device *netdev = adapter->netdev;
6b7c5b94 1129 struct sk_buff *skb;
89420424 1130
6332c8d3 1131 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1132 if (unlikely(!skb)) {
ac124ff9 1133 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1134 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1135 return;
1136 }
1137
2e588f84 1138 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1139
6332c8d3 1140 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1141 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1142 else
1143 skb_checksum_none_assert(skb);
6b7c5b94 1144
6332c8d3 1145 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1146 if (adapter->netdev->features & NETIF_F_RXHASH)
1147 skb->rxhash = rxcp->rss_hash;
1148
6b7c5b94 1149
343e43c0 1150 if (rxcp->vlanf)
4c5102f9
AK
1151 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1152
1153 netif_receive_skb(skb);
6b7c5b94
SP
1154}
1155
5be93b9a
AK
1156/* Process the RX completion indicated by rxcp when GRO is enabled */
1157static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1158 struct be_rx_obj *rxo,
2e588f84 1159 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1160{
1161 struct be_rx_page_info *page_info;
5be93b9a 1162 struct sk_buff *skb = NULL;
3abcdeda
SP
1163 struct be_queue_info *rxq = &rxo->q;
1164 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1165 u16 remaining, curr_frag_len;
1166 u16 i, j;
3968fa1e 1167
5be93b9a
AK
1168 skb = napi_get_frags(&eq_obj->napi);
1169 if (!skb) {
3abcdeda 1170 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1171 return;
1172 }
1173
2e588f84
SP
1174 remaining = rxcp->pkt_size;
1175 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1176 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1177
1178 curr_frag_len = min(remaining, rx_frag_size);
1179
bd46cb6c
AK
1180 /* Coalesce all frags from the same physical page in one slot */
1181 if (i == 0 || page_info->page_offset == 0) {
1182 /* First frag or Fresh page */
1183 j++;
b061b39e 1184 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1185 skb_shinfo(skb)->frags[j].page_offset =
1186 page_info->page_offset;
9e903e08 1187 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1188 } else {
1189 put_page(page_info->page);
1190 }
9e903e08 1191 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1192 skb->truesize += rx_frag_size;
bd46cb6c 1193 remaining -= curr_frag_len;
2e588f84 1194 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1195 memset(page_info, 0, sizeof(*page_info));
1196 }
bd46cb6c 1197 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1198
5be93b9a 1199 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1200 skb->len = rxcp->pkt_size;
1201 skb->data_len = rxcp->pkt_size;
5be93b9a 1202 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1203 if (adapter->netdev->features & NETIF_F_RXHASH)
1204 skb->rxhash = rxcp->rss_hash;
5be93b9a 1205
343e43c0 1206 if (rxcp->vlanf)
4c5102f9
AK
1207 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1208
1209 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1210}
1211
1212static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1213 struct be_eth_rx_compl *compl,
1214 struct be_rx_compl_info *rxcp)
1215{
1216 rxcp->pkt_size =
1217 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1218 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1219 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1220 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1221 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1222 rxcp->ip_csum =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1224 rxcp->l4_csum =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1226 rxcp->ipv6 =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1228 rxcp->rxq_idx =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1230 rxcp->num_rcvd =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1232 rxcp->pkt_type =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1234 rxcp->rss_hash =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1236 if (rxcp->vlanf) {
1237 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1238 compl);
1239 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1240 compl);
15d72184 1241 }
12004ae9 1242 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1243}
1244
1245static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1246 struct be_eth_rx_compl *compl,
1247 struct be_rx_compl_info *rxcp)
1248{
1249 rxcp->pkt_size =
1250 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1251 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1252 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1253 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1254 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1255 rxcp->ip_csum =
1256 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1257 rxcp->l4_csum =
1258 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1259 rxcp->ipv6 =
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1261 rxcp->rxq_idx =
1262 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1263 rxcp->num_rcvd =
1264 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1265 rxcp->pkt_type =
1266 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1267 rxcp->rss_hash =
1268 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1269 if (rxcp->vlanf) {
1270 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1271 compl);
1272 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1273 compl);
15d72184 1274 }
12004ae9 1275 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1276}
1277
1278static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1279{
1280 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1281 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1282 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1283
2e588f84
SP
1284 /* For checking the valid bit it is Ok to use either definition as the
1285 * valid bit is at the same position in both v0 and v1 Rx compl */
1286 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1287 return NULL;
6b7c5b94 1288
2e588f84
SP
1289 rmb();
1290 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1291
2e588f84
SP
1292 if (adapter->be3_native)
1293 be_parse_rx_compl_v1(adapter, compl, rxcp);
1294 else
1295 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1296
15d72184
SP
1297 if (rxcp->vlanf) {
1298 /* vlanf could be wrongly set in some cards.
1299 * ignore if vtm is not set */
752961a1 1300 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1301 rxcp->vlanf = 0;
6b7c5b94 1302
15d72184 1303 if (!lancer_chip(adapter))
3c709f8f 1304 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1305
939cf306 1306 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1307 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1308 rxcp->vlanf = 0;
1309 }
2e588f84
SP
1310
1311 /* As the compl has been parsed, reset it; we wont touch it again */
1312 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1313
3abcdeda 1314 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1315 return rxcp;
1316}
1317
1829b086 1318static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1319{
6b7c5b94 1320 u32 order = get_order(size);
1829b086 1321
6b7c5b94 1322 if (order > 0)
1829b086
ED
1323 gfp |= __GFP_COMP;
1324 return alloc_pages(gfp, order);
6b7c5b94
SP
1325}
1326
1327/*
1328 * Allocate a page, split it to fragments of size rx_frag_size and post as
1329 * receive buffers to BE
1330 */
1829b086 1331static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1332{
3abcdeda
SP
1333 struct be_adapter *adapter = rxo->adapter;
1334 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1335 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1336 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1337 struct page *pagep = NULL;
1338 struct be_eth_rx_d *rxd;
1339 u64 page_dmaaddr = 0, frag_dmaaddr;
1340 u32 posted, page_offset = 0;
1341
3abcdeda 1342 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1343 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1344 if (!pagep) {
1829b086 1345 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1346 if (unlikely(!pagep)) {
ac124ff9 1347 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1348 break;
1349 }
2b7bcebf
IV
1350 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1351 0, adapter->big_page_size,
1352 DMA_FROM_DEVICE);
6b7c5b94
SP
1353 page_info->page_offset = 0;
1354 } else {
1355 get_page(pagep);
1356 page_info->page_offset = page_offset + rx_frag_size;
1357 }
1358 page_offset = page_info->page_offset;
1359 page_info->page = pagep;
fac6da5b 1360 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1361 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1362
1363 rxd = queue_head_node(rxq);
1364 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1365 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1366
1367 /* Any space left in the current big page for another frag? */
1368 if ((page_offset + rx_frag_size + rx_frag_size) >
1369 adapter->big_page_size) {
1370 pagep = NULL;
1371 page_info->last_page_user = true;
1372 }
26d92f92
SP
1373
1374 prev_page_info = page_info;
1375 queue_head_inc(rxq);
6b7c5b94
SP
1376 page_info = &page_info_tbl[rxq->head];
1377 }
1378 if (pagep)
26d92f92 1379 prev_page_info->last_page_user = true;
6b7c5b94
SP
1380
1381 if (posted) {
6b7c5b94 1382 atomic_add(posted, &rxq->used);
8788fdc2 1383 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1384 } else if (atomic_read(&rxq->used) == 0) {
1385 /* Let be_worker replenish when memory is available */
3abcdeda 1386 rxo->rx_post_starved = true;
6b7c5b94 1387 }
6b7c5b94
SP
1388}
1389
5fb379ee 1390static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1391{
6b7c5b94
SP
1392 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1393
1394 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1395 return NULL;
1396
f3eb62d2 1397 rmb();
6b7c5b94
SP
1398 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1399
1400 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1401
1402 queue_tail_inc(tx_cq);
1403 return txcp;
1404}
1405
3c8def97
SP
1406static u16 be_tx_compl_process(struct be_adapter *adapter,
1407 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1408{
3c8def97 1409 struct be_queue_info *txq = &txo->q;
a73b796e 1410 struct be_eth_wrb *wrb;
3c8def97 1411 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1412 struct sk_buff *sent_skb;
ec43b1a6
SP
1413 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1414 bool unmap_skb_hdr = true;
6b7c5b94 1415
ec43b1a6 1416 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1417 BUG_ON(!sent_skb);
ec43b1a6
SP
1418 sent_skbs[txq->tail] = NULL;
1419
1420 /* skip header wrb */
a73b796e 1421 queue_tail_inc(txq);
6b7c5b94 1422
ec43b1a6 1423 do {
6b7c5b94 1424 cur_index = txq->tail;
a73b796e 1425 wrb = queue_tail_node(txq);
2b7bcebf
IV
1426 unmap_tx_frag(&adapter->pdev->dev, wrb,
1427 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1428 unmap_skb_hdr = false;
1429
6b7c5b94
SP
1430 num_wrbs++;
1431 queue_tail_inc(txq);
ec43b1a6 1432 } while (cur_index != last_index);
6b7c5b94 1433
6b7c5b94 1434 kfree_skb(sent_skb);
4d586b82 1435 return num_wrbs;
6b7c5b94
SP
1436}
1437
859b1e4e
SP
1438static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1439{
1440 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1441
1442 if (!eqe->evt)
1443 return NULL;
1444
f3eb62d2 1445 rmb();
859b1e4e
SP
1446 eqe->evt = le32_to_cpu(eqe->evt);
1447 queue_tail_inc(&eq_obj->q);
1448 return eqe;
1449}
1450
1451static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1452 struct be_eq_obj *eq_obj,
1453 bool rearm)
859b1e4e
SP
1454{
1455 struct be_eq_entry *eqe;
1456 u16 num = 0;
1457
1458 while ((eqe = event_get(eq_obj)) != NULL) {
1459 eqe->evt = 0;
1460 num++;
1461 }
1462
1463 /* Deal with any spurious interrupts that come
1464 * without events
1465 */
3c8def97
SP
1466 if (!num)
1467 rearm = true;
1468
1469 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1470 if (num)
1471 napi_schedule(&eq_obj->napi);
1472
1473 return num;
1474}
1475
1476/* Just read and notify events without processing them.
1477 * Used at the time of destroying event queues */
1478static void be_eq_clean(struct be_adapter *adapter,
1479 struct be_eq_obj *eq_obj)
1480{
1481 struct be_eq_entry *eqe;
1482 u16 num = 0;
1483
1484 while ((eqe = event_get(eq_obj)) != NULL) {
1485 eqe->evt = 0;
1486 num++;
1487 }
1488
1489 if (num)
1490 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1491}
1492
3abcdeda 1493static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1494{
1495 struct be_rx_page_info *page_info;
3abcdeda
SP
1496 struct be_queue_info *rxq = &rxo->q;
1497 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1498 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1499 u16 tail;
1500
1501 /* First cleanup pending rx completions */
3abcdeda
SP
1502 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1503 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1504 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1505 }
1506
1507 /* Then free posted rx buffer that were not used */
1508 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1509 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1510 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1511 put_page(page_info->page);
1512 memset(page_info, 0, sizeof(*page_info));
1513 }
1514 BUG_ON(atomic_read(&rxq->used));
482c9e79 1515 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1516}
1517
3c8def97
SP
1518static void be_tx_compl_clean(struct be_adapter *adapter,
1519 struct be_tx_obj *txo)
6b7c5b94 1520{
3c8def97
SP
1521 struct be_queue_info *tx_cq = &txo->cq;
1522 struct be_queue_info *txq = &txo->q;
a8e9179a 1523 struct be_eth_tx_compl *txcp;
4d586b82 1524 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1525 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1526 struct sk_buff *sent_skb;
1527 bool dummy_wrb;
a8e9179a
SP
1528
1529 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1530 do {
1531 while ((txcp = be_tx_compl_get(tx_cq))) {
1532 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1533 wrb_index, txcp);
3c8def97 1534 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1535 cmpl++;
1536 }
1537 if (cmpl) {
1538 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1539 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1540 cmpl = 0;
4d586b82 1541 num_wrbs = 0;
a8e9179a
SP
1542 }
1543
1544 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1545 break;
1546
1547 mdelay(1);
1548 } while (true);
1549
1550 if (atomic_read(&txq->used))
1551 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1552 atomic_read(&txq->used));
b03388d6
SP
1553
1554 /* free posted tx for which compls will never arrive */
1555 while (atomic_read(&txq->used)) {
1556 sent_skb = sent_skbs[txq->tail];
1557 end_idx = txq->tail;
1558 index_adv(&end_idx,
fe6d2a38
SP
1559 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1560 txq->len);
3c8def97 1561 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1562 atomic_sub(num_wrbs, &txq->used);
b03388d6 1563 }
6b7c5b94
SP
1564}
1565
5fb379ee
SP
1566static void be_mcc_queues_destroy(struct be_adapter *adapter)
1567{
1568 struct be_queue_info *q;
5fb379ee 1569
8788fdc2 1570 q = &adapter->mcc_obj.q;
5fb379ee 1571 if (q->created)
8788fdc2 1572 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1573 be_queue_free(adapter, q);
1574
8788fdc2 1575 q = &adapter->mcc_obj.cq;
5fb379ee 1576 if (q->created)
8788fdc2 1577 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1578 be_queue_free(adapter, q);
1579}
1580
1581/* Must be called only after TX qs are created as MCC shares TX EQ */
1582static int be_mcc_queues_create(struct be_adapter *adapter)
1583{
1584 struct be_queue_info *q, *cq;
5fb379ee
SP
1585
1586 /* Alloc MCC compl queue */
8788fdc2 1587 cq = &adapter->mcc_obj.cq;
5fb379ee 1588 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1589 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1590 goto err;
1591
1592 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1593 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1594 goto mcc_cq_free;
1595
1596 /* Alloc MCC queue */
8788fdc2 1597 q = &adapter->mcc_obj.q;
5fb379ee
SP
1598 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1599 goto mcc_cq_destroy;
1600
1601 /* Ask BE to create MCC queue */
8788fdc2 1602 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1603 goto mcc_q_free;
1604
1605 return 0;
1606
1607mcc_q_free:
1608 be_queue_free(adapter, q);
1609mcc_cq_destroy:
8788fdc2 1610 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1611mcc_cq_free:
1612 be_queue_free(adapter, cq);
1613err:
1614 return -1;
1615}
1616
6b7c5b94
SP
1617static void be_tx_queues_destroy(struct be_adapter *adapter)
1618{
1619 struct be_queue_info *q;
3c8def97
SP
1620 struct be_tx_obj *txo;
1621 u8 i;
6b7c5b94 1622
3c8def97
SP
1623 for_all_tx_queues(adapter, txo, i) {
1624 q = &txo->q;
1625 if (q->created)
1626 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1627 be_queue_free(adapter, q);
6b7c5b94 1628
3c8def97
SP
1629 q = &txo->cq;
1630 if (q->created)
1631 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1632 be_queue_free(adapter, q);
1633 }
6b7c5b94 1634
859b1e4e
SP
1635 /* Clear any residual events */
1636 be_eq_clean(adapter, &adapter->tx_eq);
1637
6b7c5b94
SP
1638 q = &adapter->tx_eq.q;
1639 if (q->created)
8788fdc2 1640 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1641 be_queue_free(adapter, q);
1642}
1643
dafc0fe3
SP
1644static int be_num_txqs_want(struct be_adapter *adapter)
1645{
1646 if ((num_vfs && adapter->sriov_enabled) ||
752961a1 1647 be_is_mc(adapter) ||
dafc0fe3
SP
1648 lancer_chip(adapter) || !be_physfn(adapter) ||
1649 adapter->generation == BE_GEN2)
1650 return 1;
1651 else
1652 return MAX_TX_QS;
1653}
1654
3c8def97 1655/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1656static int be_tx_queues_create(struct be_adapter *adapter)
1657{
1658 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1659 struct be_tx_obj *txo;
1660 u8 i;
6b7c5b94 1661
dafc0fe3
SP
1662 adapter->num_tx_qs = be_num_txqs_want(adapter);
1663 if (adapter->num_tx_qs != MAX_TX_QS)
1664 netif_set_real_num_tx_queues(adapter->netdev,
1665 adapter->num_tx_qs);
1666
6b7c5b94
SP
1667 adapter->tx_eq.max_eqd = 0;
1668 adapter->tx_eq.min_eqd = 0;
1669 adapter->tx_eq.cur_eqd = 96;
1670 adapter->tx_eq.enable_aic = false;
3c8def97 1671
6b7c5b94 1672 eq = &adapter->tx_eq.q;
3c8def97
SP
1673 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1674 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1675 return -1;
1676
8788fdc2 1677 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1678 goto err;
ecd62107 1679 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1680
3c8def97
SP
1681 for_all_tx_queues(adapter, txo, i) {
1682 cq = &txo->cq;
1683 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1684 sizeof(struct be_eth_tx_compl)))
3c8def97 1685 goto err;
6b7c5b94 1686
3c8def97
SP
1687 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1688 goto err;
6b7c5b94 1689
3c8def97
SP
1690 q = &txo->q;
1691 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1692 sizeof(struct be_eth_wrb)))
1693 goto err;
6b7c5b94 1694
3c8def97
SP
1695 if (be_cmd_txq_create(adapter, q, cq))
1696 goto err;
1697 }
6b7c5b94
SP
1698 return 0;
1699
3c8def97
SP
1700err:
1701 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1702 return -1;
1703}
1704
1705static void be_rx_queues_destroy(struct be_adapter *adapter)
1706{
1707 struct be_queue_info *q;
3abcdeda
SP
1708 struct be_rx_obj *rxo;
1709 int i;
1710
1711 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1712 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1713
1714 q = &rxo->cq;
1715 if (q->created)
1716 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1717 be_queue_free(adapter, q);
1718
3abcdeda 1719 q = &rxo->rx_eq.q;
482c9e79 1720 if (q->created)
3abcdeda 1721 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1722 be_queue_free(adapter, q);
6b7c5b94 1723 }
6b7c5b94
SP
1724}
1725
ac6a0c4a
SP
1726static u32 be_num_rxqs_want(struct be_adapter *adapter)
1727{
c814fd36 1728 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
752961a1
SP
1729 !adapter->sriov_enabled && be_physfn(adapter) &&
1730 !be_is_mc(adapter)) {
ac6a0c4a
SP
1731 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1732 } else {
1733 dev_warn(&adapter->pdev->dev,
1734 "No support for multiple RX queues\n");
1735 return 1;
1736 }
1737}
1738
6b7c5b94
SP
1739static int be_rx_queues_create(struct be_adapter *adapter)
1740{
1741 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1742 struct be_rx_obj *rxo;
1743 int rc, i;
6b7c5b94 1744
ac6a0c4a
SP
1745 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1746 msix_enabled(adapter) ?
1747 adapter->num_msix_vec - 1 : 1);
1748 if (adapter->num_rx_qs != MAX_RX_QS)
1749 dev_warn(&adapter->pdev->dev,
1750 "Can create only %d RX queues", adapter->num_rx_qs);
1751
6b7c5b94 1752 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1753 for_all_rx_queues(adapter, rxo, i) {
1754 rxo->adapter = adapter;
1755 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1756 rxo->rx_eq.enable_aic = true;
1757
1758 /* EQ */
1759 eq = &rxo->rx_eq.q;
1760 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1761 sizeof(struct be_eq_entry));
1762 if (rc)
1763 goto err;
1764
1765 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1766 if (rc)
1767 goto err;
1768
ecd62107 1769 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1770
3abcdeda
SP
1771 /* CQ */
1772 cq = &rxo->cq;
1773 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1774 sizeof(struct be_eth_rx_compl));
1775 if (rc)
1776 goto err;
1777
1778 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1779 if (rc)
1780 goto err;
482c9e79
SP
1781
1782 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1783 q = &rxo->q;
1784 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1785 sizeof(struct be_eth_rx_d));
1786 if (rc)
1787 goto err;
1788
3abcdeda 1789 }
6b7c5b94
SP
1790
1791 return 0;
3abcdeda
SP
1792err:
1793 be_rx_queues_destroy(adapter);
1794 return -1;
6b7c5b94 1795}
6b7c5b94 1796
fe6d2a38 1797static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1798{
fe6d2a38
SP
1799 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1800 if (!eqe->evt)
1801 return false;
1802 else
1803 return true;
b628bde2
SP
1804}
1805
6b7c5b94
SP
1806static irqreturn_t be_intx(int irq, void *dev)
1807{
1808 struct be_adapter *adapter = dev;
3abcdeda 1809 struct be_rx_obj *rxo;
fe6d2a38 1810 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1811
fe6d2a38
SP
1812 if (lancer_chip(adapter)) {
1813 if (event_peek(&adapter->tx_eq))
3c8def97 1814 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1815 for_all_rx_queues(adapter, rxo, i) {
1816 if (event_peek(&rxo->rx_eq))
3c8def97 1817 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1818 }
6b7c5b94 1819
fe6d2a38
SP
1820 if (!(tx || rx))
1821 return IRQ_NONE;
3abcdeda 1822
fe6d2a38
SP
1823 } else {
1824 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1825 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1826 if (!isr)
1827 return IRQ_NONE;
1828
ecd62107 1829 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1830 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1831
1832 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1833 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1834 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1835 }
3abcdeda 1836 }
c001c213 1837
8788fdc2 1838 return IRQ_HANDLED;
6b7c5b94
SP
1839}
1840
1841static irqreturn_t be_msix_rx(int irq, void *dev)
1842{
3abcdeda
SP
1843 struct be_rx_obj *rxo = dev;
1844 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1845
3c8def97 1846 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1847
1848 return IRQ_HANDLED;
1849}
1850
5fb379ee 1851static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1852{
1853 struct be_adapter *adapter = dev;
1854
3c8def97 1855 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1856
1857 return IRQ_HANDLED;
1858}
1859
2e588f84 1860static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1861{
2e588f84 1862 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1863}
1864
49b05221 1865static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1866{
1867 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1868 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1869 struct be_adapter *adapter = rxo->adapter;
1870 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1871 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1872 u32 work_done;
1873
ac124ff9 1874 rx_stats(rxo)->rx_polls++;
6b7c5b94 1875 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1876 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1877 if (!rxcp)
1878 break;
1879
12004ae9
SP
1880 /* Is it a flush compl that has no data */
1881 if (unlikely(rxcp->num_rcvd == 0))
1882 goto loop_continue;
1883
1884 /* Discard compl with partial DMA Lancer B0 */
1885 if (unlikely(!rxcp->pkt_size)) {
1886 be_rx_compl_discard(adapter, rxo, rxcp);
1887 goto loop_continue;
1888 }
1889
1890 /* On BE drop pkts that arrive due to imperfect filtering in
1891 * promiscuous mode on some skews
1892 */
1893 if (unlikely(rxcp->port != adapter->port_num &&
1894 !lancer_chip(adapter))) {
009dd872 1895 be_rx_compl_discard(adapter, rxo, rxcp);
12004ae9 1896 goto loop_continue;
64642811 1897 }
009dd872 1898
12004ae9
SP
1899 if (do_gro(rxcp))
1900 be_rx_compl_process_gro(adapter, rxo, rxcp);
1901 else
1902 be_rx_compl_process(adapter, rxo, rxcp);
1903loop_continue:
2e588f84 1904 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1905 }
1906
6b7c5b94 1907 /* Refill the queue */
857c9905 1908 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1909 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1910
1911 /* All consumed */
1912 if (work_done < budget) {
1913 napi_complete(napi);
8788fdc2 1914 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1915 } else {
1916 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1917 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1918 }
1919 return work_done;
1920}
1921
f31e50a8
SP
1922/* As TX and MCC share the same EQ check for both TX and MCC completions.
1923 * For TX/MCC we don't honour budget; consume everything
1924 */
1925static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1926{
f31e50a8
SP
1927 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1928 struct be_adapter *adapter =
1929 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1930 struct be_tx_obj *txo;
6b7c5b94 1931 struct be_eth_tx_compl *txcp;
3c8def97
SP
1932 int tx_compl, mcc_compl, status = 0;
1933 u8 i;
1934 u16 num_wrbs;
1935
1936 for_all_tx_queues(adapter, txo, i) {
1937 tx_compl = 0;
1938 num_wrbs = 0;
1939 while ((txcp = be_tx_compl_get(&txo->cq))) {
1940 num_wrbs += be_tx_compl_process(adapter, txo,
1941 AMAP_GET_BITS(struct amap_eth_tx_compl,
1942 wrb_index, txcp));
1943 tx_compl++;
1944 }
1945 if (tx_compl) {
1946 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1947
1948 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1949
3c8def97
SP
1950 /* As Tx wrbs have been freed up, wake up netdev queue
1951 * if it was stopped due to lack of tx wrbs. */
1952 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1953 atomic_read(&txo->q.used) < txo->q.len / 2) {
1954 netif_wake_subqueue(adapter->netdev, i);
1955 }
1956
ab1594e9 1957 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 1958 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 1959 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 1960 }
6b7c5b94
SP
1961 }
1962
f31e50a8
SP
1963 mcc_compl = be_process_mcc(adapter, &status);
1964
f31e50a8
SP
1965 if (mcc_compl) {
1966 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1967 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1968 }
1969
3c8def97 1970 napi_complete(napi);
6b7c5b94 1971
3c8def97 1972 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 1973 adapter->drv_stats.tx_events++;
6b7c5b94
SP
1974 return 1;
1975}
1976
d053de91 1977void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1978{
1979 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1980 u32 i;
1981
1982 pci_read_config_dword(adapter->pdev,
1983 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1984 pci_read_config_dword(adapter->pdev,
1985 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1986 pci_read_config_dword(adapter->pdev,
1987 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1988 pci_read_config_dword(adapter->pdev,
1989 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1990
1991 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1992 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1993
d053de91
AK
1994 if (ue_status_lo || ue_status_hi) {
1995 adapter->ue_detected = true;
7acc2087 1996 adapter->eeh_err = true;
d053de91
AK
1997 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1998 }
1999
7c185276
AK
2000 if (ue_status_lo) {
2001 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2002 if (ue_status_lo & 1)
2003 dev_err(&adapter->pdev->dev,
2004 "UE: %s bit set\n", ue_status_low_desc[i]);
2005 }
2006 }
2007 if (ue_status_hi) {
2008 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2009 if (ue_status_hi & 1)
2010 dev_err(&adapter->pdev->dev,
2011 "UE: %s bit set\n", ue_status_hi_desc[i]);
2012 }
2013 }
2014
2015}
2016
ea1dae11
SP
2017static void be_worker(struct work_struct *work)
2018{
2019 struct be_adapter *adapter =
2020 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
2021 struct be_rx_obj *rxo;
2022 int i;
ea1dae11 2023
16da8250
SP
2024 if (!adapter->ue_detected && !lancer_chip(adapter))
2025 be_detect_dump_ue(adapter);
2026
f203af70
SK
2027 /* when interrupts are not yet enabled, just reap any pending
2028 * mcc completions */
2029 if (!netif_running(adapter->netdev)) {
2030 int mcc_compl, status = 0;
2031
2032 mcc_compl = be_process_mcc(adapter, &status);
2033
2034 if (mcc_compl) {
2035 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2036 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2037 }
9b037f38 2038
f203af70
SK
2039 goto reschedule;
2040 }
2041
005d5696
SX
2042 if (!adapter->stats_cmd_sent) {
2043 if (lancer_chip(adapter))
2044 lancer_cmd_get_pport_stats(adapter,
2045 &adapter->stats_cmd);
2046 else
2047 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2048 }
3c8def97 2049
3abcdeda 2050 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2051 be_rx_eqd_update(adapter, rxo);
2052
2053 if (rxo->rx_post_starved) {
2054 rxo->rx_post_starved = false;
1829b086 2055 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 2056 }
ea1dae11
SP
2057 }
2058
f203af70 2059reschedule:
e74fbd03 2060 adapter->work_counter++;
ea1dae11
SP
2061 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2062}
2063
8d56ff11
SP
2064static void be_msix_disable(struct be_adapter *adapter)
2065{
ac6a0c4a 2066 if (msix_enabled(adapter)) {
8d56ff11 2067 pci_disable_msix(adapter->pdev);
ac6a0c4a 2068 adapter->num_msix_vec = 0;
3abcdeda
SP
2069 }
2070}
2071
6b7c5b94
SP
2072static void be_msix_enable(struct be_adapter *adapter)
2073{
3abcdeda 2074#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2075 int i, status, num_vec;
6b7c5b94 2076
ac6a0c4a 2077 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2078
ac6a0c4a 2079 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2080 adapter->msix_entries[i].entry = i;
2081
ac6a0c4a 2082 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2083 if (status == 0) {
2084 goto done;
2085 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2086 num_vec = status;
3abcdeda 2087 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2088 num_vec) == 0)
3abcdeda 2089 goto done;
3abcdeda
SP
2090 }
2091 return;
2092done:
ac6a0c4a
SP
2093 adapter->num_msix_vec = num_vec;
2094 return;
6b7c5b94
SP
2095}
2096
f9449ab7 2097static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2098{
344dbf10 2099 be_check_sriov_fn_type(adapter);
6dedec81 2100#ifdef CONFIG_PCI_IOV
ba343c77 2101 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2102 int status, pos;
2103 u16 nvfs;
2104
2105 pos = pci_find_ext_capability(adapter->pdev,
2106 PCI_EXT_CAP_ID_SRIOV);
2107 pci_read_config_word(adapter->pdev,
2108 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2109
2110 if (num_vfs > nvfs) {
2111 dev_info(&adapter->pdev->dev,
2112 "Device supports %d VFs and not %d\n",
2113 nvfs, num_vfs);
2114 num_vfs = nvfs;
2115 }
6dedec81 2116
ba343c77
SB
2117 status = pci_enable_sriov(adapter->pdev, num_vfs);
2118 adapter->sriov_enabled = status ? false : true;
f9449ab7
SP
2119
2120 if (adapter->sriov_enabled) {
2121 adapter->vf_cfg = kcalloc(num_vfs,
2122 sizeof(struct be_vf_cfg),
2123 GFP_KERNEL);
2124 if (!adapter->vf_cfg)
2125 return -ENOMEM;
2126 }
ba343c77
SB
2127 }
2128#endif
f9449ab7 2129 return 0;
ba343c77
SB
2130}
2131
2132static void be_sriov_disable(struct be_adapter *adapter)
2133{
2134#ifdef CONFIG_PCI_IOV
2135 if (adapter->sriov_enabled) {
2136 pci_disable_sriov(adapter->pdev);
f9449ab7 2137 kfree(adapter->vf_cfg);
ba343c77
SB
2138 adapter->sriov_enabled = false;
2139 }
2140#endif
2141}
2142
fe6d2a38
SP
2143static inline int be_msix_vec_get(struct be_adapter *adapter,
2144 struct be_eq_obj *eq_obj)
6b7c5b94 2145{
ecd62107 2146 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2147}
2148
b628bde2
SP
2149static int be_request_irq(struct be_adapter *adapter,
2150 struct be_eq_obj *eq_obj,
3abcdeda 2151 void *handler, char *desc, void *context)
6b7c5b94
SP
2152{
2153 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2154 int vec;
2155
2156 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2157 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2158 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2159}
2160
3abcdeda
SP
2161static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2162 void *context)
b628bde2 2163{
fe6d2a38 2164 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2165 free_irq(vec, context);
b628bde2 2166}
6b7c5b94 2167
b628bde2
SP
2168static int be_msix_register(struct be_adapter *adapter)
2169{
3abcdeda
SP
2170 struct be_rx_obj *rxo;
2171 int status, i;
2172 char qname[10];
b628bde2 2173
3abcdeda
SP
2174 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2175 adapter);
6b7c5b94
SP
2176 if (status)
2177 goto err;
2178
3abcdeda
SP
2179 for_all_rx_queues(adapter, rxo, i) {
2180 sprintf(qname, "rxq%d", i);
2181 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2182 qname, rxo);
2183 if (status)
2184 goto err_msix;
2185 }
b628bde2 2186
6b7c5b94 2187 return 0;
b628bde2 2188
3abcdeda
SP
2189err_msix:
2190 be_free_irq(adapter, &adapter->tx_eq, adapter);
2191
2192 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2193 be_free_irq(adapter, &rxo->rx_eq, rxo);
2194
6b7c5b94
SP
2195err:
2196 dev_warn(&adapter->pdev->dev,
2197 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2198 be_msix_disable(adapter);
6b7c5b94
SP
2199 return status;
2200}
2201
2202static int be_irq_register(struct be_adapter *adapter)
2203{
2204 struct net_device *netdev = adapter->netdev;
2205 int status;
2206
ac6a0c4a 2207 if (msix_enabled(adapter)) {
6b7c5b94
SP
2208 status = be_msix_register(adapter);
2209 if (status == 0)
2210 goto done;
ba343c77
SB
2211 /* INTx is not supported for VF */
2212 if (!be_physfn(adapter))
2213 return status;
6b7c5b94
SP
2214 }
2215
2216 /* INTx */
2217 netdev->irq = adapter->pdev->irq;
2218 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2219 adapter);
2220 if (status) {
2221 dev_err(&adapter->pdev->dev,
2222 "INTx request IRQ failed - err %d\n", status);
2223 return status;
2224 }
2225done:
2226 adapter->isr_registered = true;
2227 return 0;
2228}
2229
2230static void be_irq_unregister(struct be_adapter *adapter)
2231{
2232 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2233 struct be_rx_obj *rxo;
2234 int i;
6b7c5b94
SP
2235
2236 if (!adapter->isr_registered)
2237 return;
2238
2239 /* INTx */
ac6a0c4a 2240 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2241 free_irq(netdev->irq, adapter);
2242 goto done;
2243 }
2244
2245 /* MSIx */
3abcdeda
SP
2246 be_free_irq(adapter, &adapter->tx_eq, adapter);
2247
2248 for_all_rx_queues(adapter, rxo, i)
2249 be_free_irq(adapter, &rxo->rx_eq, rxo);
2250
6b7c5b94
SP
2251done:
2252 adapter->isr_registered = false;
6b7c5b94
SP
2253}
2254
482c9e79
SP
2255static void be_rx_queues_clear(struct be_adapter *adapter)
2256{
2257 struct be_queue_info *q;
2258 struct be_rx_obj *rxo;
2259 int i;
2260
2261 for_all_rx_queues(adapter, rxo, i) {
2262 q = &rxo->q;
2263 if (q->created) {
2264 be_cmd_rxq_destroy(adapter, q);
2265 /* After the rxq is invalidated, wait for a grace time
2266 * of 1ms for all dma to end and the flush compl to
2267 * arrive
2268 */
2269 mdelay(1);
2270 be_rx_q_clean(adapter, rxo);
2271 }
2272
2273 /* Clear any residual events */
2274 q = &rxo->rx_eq.q;
2275 if (q->created)
2276 be_eq_clean(adapter, &rxo->rx_eq);
2277 }
2278}
2279
889cd4b2
SP
2280static int be_close(struct net_device *netdev)
2281{
2282 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2283 struct be_rx_obj *rxo;
3c8def97 2284 struct be_tx_obj *txo;
889cd4b2 2285 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2286 int vec, i;
889cd4b2 2287
889cd4b2
SP
2288 be_async_mcc_disable(adapter);
2289
fe6d2a38
SP
2290 if (!lancer_chip(adapter))
2291 be_intr_set(adapter, false);
889cd4b2 2292
63fcb27f
PR
2293 for_all_rx_queues(adapter, rxo, i)
2294 napi_disable(&rxo->rx_eq.napi);
2295
2296 napi_disable(&tx_eq->napi);
2297
2298 if (lancer_chip(adapter)) {
63fcb27f
PR
2299 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2300 for_all_rx_queues(adapter, rxo, i)
2301 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2302 for_all_tx_queues(adapter, txo, i)
2303 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2304 }
2305
ac6a0c4a 2306 if (msix_enabled(adapter)) {
fe6d2a38 2307 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2308 synchronize_irq(vec);
3abcdeda
SP
2309
2310 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2311 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2312 synchronize_irq(vec);
2313 }
889cd4b2
SP
2314 } else {
2315 synchronize_irq(netdev->irq);
2316 }
2317 be_irq_unregister(adapter);
2318
889cd4b2
SP
2319 /* Wait for all pending tx completions to arrive so that
2320 * all tx skbs are freed.
2321 */
3c8def97
SP
2322 for_all_tx_queues(adapter, txo, i)
2323 be_tx_compl_clean(adapter, txo);
889cd4b2 2324
482c9e79
SP
2325 be_rx_queues_clear(adapter);
2326 return 0;
2327}
2328
2329static int be_rx_queues_setup(struct be_adapter *adapter)
2330{
2331 struct be_rx_obj *rxo;
2332 int rc, i;
2333 u8 rsstable[MAX_RSS_QS];
2334
2335 for_all_rx_queues(adapter, rxo, i) {
2336 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2337 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2338 adapter->if_handle,
2339 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2340 if (rc)
2341 return rc;
2342 }
2343
2344 if (be_multi_rxq(adapter)) {
2345 for_all_rss_queues(adapter, rxo, i)
2346 rsstable[i] = rxo->rss_id;
2347
2348 rc = be_cmd_rss_config(adapter, rsstable,
2349 adapter->num_rx_qs - 1);
2350 if (rc)
2351 return rc;
2352 }
2353
2354 /* First time posting */
2355 for_all_rx_queues(adapter, rxo, i) {
2356 be_post_rx_frags(rxo, GFP_KERNEL);
2357 napi_enable(&rxo->rx_eq.napi);
2358 }
889cd4b2
SP
2359 return 0;
2360}
2361
6b7c5b94
SP
2362static int be_open(struct net_device *netdev)
2363{
2364 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2365 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2366 struct be_rx_obj *rxo;
3abcdeda 2367 int status, i;
5fb379ee 2368
482c9e79
SP
2369 status = be_rx_queues_setup(adapter);
2370 if (status)
2371 goto err;
2372
5fb379ee
SP
2373 napi_enable(&tx_eq->napi);
2374
2375 be_irq_register(adapter);
2376
fe6d2a38
SP
2377 if (!lancer_chip(adapter))
2378 be_intr_set(adapter, true);
5fb379ee
SP
2379
2380 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2381 for_all_rx_queues(adapter, rxo, i) {
2382 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2383 be_cq_notify(adapter, rxo->cq.id, true, 0);
2384 }
8788fdc2 2385 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2386
7a1e9b20
SP
2387 /* Now that interrupts are on we can process async mcc */
2388 be_async_mcc_enable(adapter);
2389
889cd4b2
SP
2390 return 0;
2391err:
2392 be_close(adapter->netdev);
2393 return -EIO;
5fb379ee
SP
2394}
2395
71d8d1b5
AK
2396static int be_setup_wol(struct be_adapter *adapter, bool enable)
2397{
2398 struct be_dma_mem cmd;
2399 int status = 0;
2400 u8 mac[ETH_ALEN];
2401
2402 memset(mac, 0, ETH_ALEN);
2403
2404 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2405 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2406 GFP_KERNEL);
71d8d1b5
AK
2407 if (cmd.va == NULL)
2408 return -1;
2409 memset(cmd.va, 0, cmd.size);
2410
2411 if (enable) {
2412 status = pci_write_config_dword(adapter->pdev,
2413 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2414 if (status) {
2415 dev_err(&adapter->pdev->dev,
2381a55c 2416 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2417 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2418 cmd.dma);
71d8d1b5
AK
2419 return status;
2420 }
2421 status = be_cmd_enable_magic_wol(adapter,
2422 adapter->netdev->dev_addr, &cmd);
2423 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2424 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2425 } else {
2426 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2427 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2428 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2429 }
2430
2b7bcebf 2431 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2432 return status;
2433}
2434
6d87f5c3
AK
2435/*
2436 * Generate a seed MAC address from the PF MAC Address using jhash.
2437 * MAC Address for VFs are assigned incrementally starting from the seed.
2438 * These addresses are programmed in the ASIC by the PF and the VF driver
2439 * queries for the MAC address during its probe.
2440 */
2441static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2442{
f9449ab7 2443 u32 vf;
3abcdeda 2444 int status = 0;
6d87f5c3
AK
2445 u8 mac[ETH_ALEN];
2446
2447 be_vf_eth_addr_generate(adapter, mac);
2448
2449 for (vf = 0; vf < num_vfs; vf++) {
2450 status = be_cmd_pmac_add(adapter, mac,
2451 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2452 &adapter->vf_cfg[vf].vf_pmac_id,
2453 vf + 1);
6d87f5c3
AK
2454 if (status)
2455 dev_err(&adapter->pdev->dev,
2456 "Mac address add failed for VF %d\n", vf);
2457 else
2458 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2459
2460 mac[5] += 1;
2461 }
2462 return status;
2463}
2464
f9449ab7 2465static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3
AK
2466{
2467 u32 vf;
2468
2469 for (vf = 0; vf < num_vfs; vf++) {
2470 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2471 be_cmd_pmac_del(adapter,
2472 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2473 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3 2474 }
f9449ab7
SP
2475
2476 for (vf = 0; vf < num_vfs; vf++)
2477 if (adapter->vf_cfg[vf].vf_if_handle)
2478 be_cmd_if_destroy(adapter,
2479 adapter->vf_cfg[vf].vf_if_handle, vf + 1);
6d87f5c3
AK
2480}
2481
a54769f5
SP
2482static int be_clear(struct be_adapter *adapter)
2483{
a54769f5 2484 if (be_physfn(adapter) && adapter->sriov_enabled)
f9449ab7
SP
2485 be_vf_clear(adapter);
2486
2487 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2488
2489 be_mcc_queues_destroy(adapter);
2490 be_rx_queues_destroy(adapter);
2491 be_tx_queues_destroy(adapter);
2492 adapter->eq_next_idx = 0;
2493
a54769f5
SP
2494 adapter->be3_native = false;
2495 adapter->promiscuous = false;
2496
2497 /* tell fw we're done with firing cmds */
2498 be_cmd_fw_clean(adapter);
2499 return 0;
2500}
2501
f9449ab7
SP
2502static int be_vf_setup(struct be_adapter *adapter)
2503{
2504 u32 cap_flags, en_flags, vf;
2505 u16 lnk_speed;
2506 int status;
2507
2508 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2509 for (vf = 0; vf < num_vfs; vf++) {
2510 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2511 &adapter->vf_cfg[vf].vf_if_handle,
2512 NULL, vf+1);
2513 if (status)
2514 goto err;
2515 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2516 }
2517
2518 if (!lancer_chip(adapter)) {
2519 status = be_vf_eth_addr_config(adapter);
2520 if (status)
2521 goto err;
2522 }
2523
2524 for (vf = 0; vf < num_vfs; vf++) {
2525 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2526 vf + 1);
2527 if (status)
2528 goto err;
2529 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2530 }
2531 return 0;
2532err:
2533 return status;
2534}
2535
5fb379ee
SP
2536static int be_setup(struct be_adapter *adapter)
2537{
5fb379ee 2538 struct net_device *netdev = adapter->netdev;
f9449ab7 2539 u32 cap_flags, en_flags;
a54769f5 2540 u32 tx_fc, rx_fc;
6b7c5b94 2541 int status;
ba343c77
SB
2542 u8 mac[ETH_ALEN];
2543
f9449ab7
SP
2544 /* Allow all priorities by default. A GRP5 evt may modify this */
2545 adapter->vlan_prio_bmap = 0xff;
2546 adapter->link_speed = -1;
6b7c5b94 2547
f9449ab7 2548 be_cmd_req_native_mode(adapter);
73d540f2 2549
f9449ab7 2550 status = be_tx_queues_create(adapter);
6b7c5b94 2551 if (status != 0)
a54769f5 2552 goto err;
6b7c5b94 2553
f9449ab7 2554 status = be_rx_queues_create(adapter);
6b7c5b94 2555 if (status != 0)
a54769f5 2556 goto err;
6b7c5b94 2557
f9449ab7 2558 status = be_mcc_queues_create(adapter);
6b7c5b94 2559 if (status != 0)
a54769f5 2560 goto err;
6b7c5b94 2561
f9449ab7
SP
2562 memset(mac, 0, ETH_ALEN);
2563 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2564 true /*permanent */, 0);
2565 if (status)
2566 return status;
2567 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2568 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2569
f9449ab7
SP
2570 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2571 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2572 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2573 BE_IF_FLAGS_PROMISCUOUS;
2574 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2575 cap_flags |= BE_IF_FLAGS_RSS;
2576 en_flags |= BE_IF_FLAGS_RSS;
2577 }
2578 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2579 netdev->dev_addr, &adapter->if_handle,
2580 &adapter->pmac_id, 0);
5fb379ee 2581 if (status != 0)
a54769f5 2582 goto err;
6b7c5b94 2583
f9449ab7
SP
2584 /* For BEx, the VF's permanent mac queried from card is incorrect.
2585 * Query the mac configued by the PF using if_handle
2586 */
2587 if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2588 status = be_cmd_mac_addr_query(adapter, mac,
2589 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2590 if (!status) {
2591 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2592 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2593 }
2594 }
0dffc83e 2595
04b71175 2596 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2597
a54769f5
SP
2598 status = be_vid_config(adapter, false, 0);
2599 if (status)
2600 goto err;
7ab8b0b4 2601
a54769f5 2602 be_set_rx_mode(adapter->netdev);
5fb379ee 2603
a54769f5
SP
2604 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2605 if (status)
2606 goto err;
2607 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2608 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2609 adapter->rx_fc);
2610 if (status)
2611 goto err;
2612 }
2dc1deb6 2613
a54769f5 2614 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2615
f9449ab7
SP
2616 if (be_physfn(adapter) && adapter->sriov_enabled) {
2617 status = be_vf_setup(adapter);
2618 if (status)
2619 goto err;
2620 }
2621
2622 return 0;
a54769f5
SP
2623err:
2624 be_clear(adapter);
2625 return status;
2626}
6b7c5b94 2627
84517482 2628#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2629static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2630 const u8 *p, u32 img_start, int image_size,
2631 int hdr_size)
fa9a6fed
SB
2632{
2633 u32 crc_offset;
2634 u8 flashed_crc[4];
2635 int status;
3f0d4560
AK
2636
2637 crc_offset = hdr_size + img_start + image_size - 4;
2638
fa9a6fed 2639 p += crc_offset;
3f0d4560
AK
2640
2641 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2642 (image_size - 4));
fa9a6fed
SB
2643 if (status) {
2644 dev_err(&adapter->pdev->dev,
2645 "could not get crc from flash, not flashing redboot\n");
2646 return false;
2647 }
2648
2649 /*update redboot only if crc does not match*/
2650 if (!memcmp(flashed_crc, p, 4))
2651 return false;
2652 else
2653 return true;
fa9a6fed
SB
2654}
2655
306f1348
SP
2656static bool phy_flashing_required(struct be_adapter *adapter)
2657{
2658 int status = 0;
2659 struct be_phy_info phy_info;
2660
2661 status = be_cmd_get_phy_info(adapter, &phy_info);
2662 if (status)
2663 return false;
2664 if ((phy_info.phy_type == TN_8022) &&
2665 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2666 return true;
2667 }
2668 return false;
2669}
2670
3f0d4560 2671static int be_flash_data(struct be_adapter *adapter,
84517482 2672 const struct firmware *fw,
3f0d4560
AK
2673 struct be_dma_mem *flash_cmd, int num_of_images)
2674
84517482 2675{
3f0d4560
AK
2676 int status = 0, i, filehdr_size = 0;
2677 u32 total_bytes = 0, flash_op;
84517482
AK
2678 int num_bytes;
2679 const u8 *p = fw->data;
2680 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2681 const struct flash_comp *pflashcomp;
9fe96934 2682 int num_comp;
3f0d4560 2683
306f1348 2684 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2685 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2686 FLASH_IMAGE_MAX_SIZE_g3},
2687 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2688 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2689 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2690 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2691 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2692 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2693 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2694 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2695 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2696 FLASH_IMAGE_MAX_SIZE_g3},
2697 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2698 FLASH_IMAGE_MAX_SIZE_g3},
2699 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2700 FLASH_IMAGE_MAX_SIZE_g3},
2701 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2702 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2703 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2704 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2705 };
215faf9c 2706 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2707 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2708 FLASH_IMAGE_MAX_SIZE_g2},
2709 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2710 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2711 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2712 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2713 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2714 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2715 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2716 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2717 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2718 FLASH_IMAGE_MAX_SIZE_g2},
2719 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2720 FLASH_IMAGE_MAX_SIZE_g2},
2721 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2722 FLASH_IMAGE_MAX_SIZE_g2}
2723 };
2724
2725 if (adapter->generation == BE_GEN3) {
2726 pflashcomp = gen3_flash_types;
2727 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2728 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2729 } else {
2730 pflashcomp = gen2_flash_types;
2731 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2732 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2733 }
9fe96934
SB
2734 for (i = 0; i < num_comp; i++) {
2735 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2736 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2737 continue;
306f1348
SP
2738 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2739 if (!phy_flashing_required(adapter))
2740 continue;
2741 }
3f0d4560
AK
2742 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2743 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2744 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2745 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2746 continue;
2747 p = fw->data;
2748 p += filehdr_size + pflashcomp[i].offset
2749 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2750 if (p + pflashcomp[i].size > fw->data + fw->size)
2751 return -1;
2752 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2753 while (total_bytes) {
2754 if (total_bytes > 32*1024)
2755 num_bytes = 32*1024;
2756 else
2757 num_bytes = total_bytes;
2758 total_bytes -= num_bytes;
306f1348
SP
2759 if (!total_bytes) {
2760 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2761 flash_op = FLASHROM_OPER_PHY_FLASH;
2762 else
2763 flash_op = FLASHROM_OPER_FLASH;
2764 } else {
2765 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2766 flash_op = FLASHROM_OPER_PHY_SAVE;
2767 else
2768 flash_op = FLASHROM_OPER_SAVE;
2769 }
3f0d4560
AK
2770 memcpy(req->params.data_buf, p, num_bytes);
2771 p += num_bytes;
2772 status = be_cmd_write_flashrom(adapter, flash_cmd,
2773 pflashcomp[i].optype, flash_op, num_bytes);
2774 if (status) {
306f1348
SP
2775 if ((status == ILLEGAL_IOCTL_REQ) &&
2776 (pflashcomp[i].optype ==
2777 IMG_TYPE_PHY_FW))
2778 break;
3f0d4560
AK
2779 dev_err(&adapter->pdev->dev,
2780 "cmd to write to flash rom failed.\n");
2781 return -1;
2782 }
84517482 2783 }
84517482 2784 }
84517482
AK
2785 return 0;
2786}
2787
3f0d4560
AK
2788static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2789{
2790 if (fhdr == NULL)
2791 return 0;
2792 if (fhdr->build[0] == '3')
2793 return BE_GEN3;
2794 else if (fhdr->build[0] == '2')
2795 return BE_GEN2;
2796 else
2797 return 0;
2798}
2799
485bf569
SN
2800static int lancer_fw_download(struct be_adapter *adapter,
2801 const struct firmware *fw)
84517482 2802{
485bf569
SN
2803#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2804#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2805 struct be_dma_mem flash_cmd;
485bf569
SN
2806 const u8 *data_ptr = NULL;
2807 u8 *dest_image_ptr = NULL;
2808 size_t image_size = 0;
2809 u32 chunk_size = 0;
2810 u32 data_written = 0;
2811 u32 offset = 0;
2812 int status = 0;
2813 u8 add_status = 0;
84517482 2814
485bf569 2815 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2816 dev_err(&adapter->pdev->dev,
485bf569
SN
2817 "FW Image not properly aligned. "
2818 "Length must be 4 byte aligned.\n");
2819 status = -EINVAL;
2820 goto lancer_fw_exit;
d9efd2af
SB
2821 }
2822
485bf569
SN
2823 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2824 + LANCER_FW_DOWNLOAD_CHUNK;
2825 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2826 &flash_cmd.dma, GFP_KERNEL);
2827 if (!flash_cmd.va) {
2828 status = -ENOMEM;
2829 dev_err(&adapter->pdev->dev,
2830 "Memory allocation failure while flashing\n");
2831 goto lancer_fw_exit;
2832 }
84517482 2833
485bf569
SN
2834 dest_image_ptr = flash_cmd.va +
2835 sizeof(struct lancer_cmd_req_write_object);
2836 image_size = fw->size;
2837 data_ptr = fw->data;
2838
2839 while (image_size) {
2840 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2841
2842 /* Copy the image chunk content. */
2843 memcpy(dest_image_ptr, data_ptr, chunk_size);
2844
2845 status = lancer_cmd_write_object(adapter, &flash_cmd,
2846 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2847 &data_written, &add_status);
2848
2849 if (status)
2850 break;
2851
2852 offset += data_written;
2853 data_ptr += data_written;
2854 image_size -= data_written;
2855 }
2856
2857 if (!status) {
2858 /* Commit the FW written */
2859 status = lancer_cmd_write_object(adapter, &flash_cmd,
2860 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2861 &data_written, &add_status);
2862 }
2863
2864 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2865 flash_cmd.dma);
2866 if (status) {
2867 dev_err(&adapter->pdev->dev,
2868 "Firmware load error. "
2869 "Status code: 0x%x Additional Status: 0x%x\n",
2870 status, add_status);
2871 goto lancer_fw_exit;
2872 }
2873
2874 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2875lancer_fw_exit:
2876 return status;
2877}
2878
2879static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2880{
2881 struct flash_file_hdr_g2 *fhdr;
2882 struct flash_file_hdr_g3 *fhdr3;
2883 struct image_hdr *img_hdr_ptr = NULL;
2884 struct be_dma_mem flash_cmd;
2885 const u8 *p;
2886 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2887
2888 p = fw->data;
3f0d4560 2889 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2890
84517482 2891 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2892 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2893 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2894 if (!flash_cmd.va) {
2895 status = -ENOMEM;
2896 dev_err(&adapter->pdev->dev,
2897 "Memory allocation failure while flashing\n");
485bf569 2898 goto be_fw_exit;
84517482
AK
2899 }
2900
3f0d4560
AK
2901 if ((adapter->generation == BE_GEN3) &&
2902 (get_ufigen_type(fhdr) == BE_GEN3)) {
2903 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2904 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2905 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2906 img_hdr_ptr = (struct image_hdr *) (fw->data +
2907 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2908 i * sizeof(struct image_hdr)));
2909 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2910 status = be_flash_data(adapter, fw, &flash_cmd,
2911 num_imgs);
3f0d4560
AK
2912 }
2913 } else if ((adapter->generation == BE_GEN2) &&
2914 (get_ufigen_type(fhdr) == BE_GEN2)) {
2915 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2916 } else {
2917 dev_err(&adapter->pdev->dev,
2918 "UFI and Interface are not compatible for flashing\n");
2919 status = -1;
84517482
AK
2920 }
2921
2b7bcebf
IV
2922 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2923 flash_cmd.dma);
84517482
AK
2924 if (status) {
2925 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2926 goto be_fw_exit;
84517482
AK
2927 }
2928
af901ca1 2929 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2930
485bf569
SN
2931be_fw_exit:
2932 return status;
2933}
2934
2935int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2936{
2937 const struct firmware *fw;
2938 int status;
2939
2940 if (!netif_running(adapter->netdev)) {
2941 dev_err(&adapter->pdev->dev,
2942 "Firmware load not allowed (interface is down)\n");
2943 return -1;
2944 }
2945
2946 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2947 if (status)
2948 goto fw_exit;
2949
2950 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2951
2952 if (lancer_chip(adapter))
2953 status = lancer_fw_download(adapter, fw);
2954 else
2955 status = be_fw_download(adapter, fw);
2956
84517482
AK
2957fw_exit:
2958 release_firmware(fw);
2959 return status;
2960}
2961
6b7c5b94
SP
2962static struct net_device_ops be_netdev_ops = {
2963 .ndo_open = be_open,
2964 .ndo_stop = be_close,
2965 .ndo_start_xmit = be_xmit,
a54769f5 2966 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
2967 .ndo_set_mac_address = be_mac_addr_set,
2968 .ndo_change_mtu = be_change_mtu,
ab1594e9 2969 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 2970 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
2971 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2972 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2973 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2974 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2975 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2976 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2977};
2978
2979static void be_netdev_init(struct net_device *netdev)
2980{
2981 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2982 struct be_rx_obj *rxo;
2983 int i;
6b7c5b94 2984
6332c8d3 2985 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2986 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2987 NETIF_F_HW_VLAN_TX;
2988 if (be_multi_rxq(adapter))
2989 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2990
2991 netdev->features |= netdev->hw_features |
8b8ddc68 2992 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2993
eb8a50d9 2994 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 2995 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2996
6b7c5b94
SP
2997 netdev->flags |= IFF_MULTICAST;
2998
c190e3c8
AK
2999 netif_set_gso_max_size(netdev, 65535);
3000
6b7c5b94
SP
3001 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3002
3003 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3004
3abcdeda
SP
3005 for_all_rx_queues(adapter, rxo, i)
3006 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3007 BE_NAPI_WEIGHT);
3008
5fb379ee 3009 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 3010 BE_NAPI_WEIGHT);
6b7c5b94
SP
3011}
3012
3013static void be_unmap_pci_bars(struct be_adapter *adapter)
3014{
8788fdc2
SP
3015 if (adapter->csr)
3016 iounmap(adapter->csr);
3017 if (adapter->db)
3018 iounmap(adapter->db);
6b7c5b94
SP
3019}
3020
3021static int be_map_pci_bars(struct be_adapter *adapter)
3022{
3023 u8 __iomem *addr;
db3ea781 3024 int db_reg;
6b7c5b94 3025
fe6d2a38
SP
3026 if (lancer_chip(adapter)) {
3027 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3028 pci_resource_len(adapter->pdev, 0));
3029 if (addr == NULL)
3030 return -ENOMEM;
3031 adapter->db = addr;
3032 return 0;
3033 }
3034
ba343c77
SB
3035 if (be_physfn(adapter)) {
3036 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3037 pci_resource_len(adapter->pdev, 2));
3038 if (addr == NULL)
3039 return -ENOMEM;
3040 adapter->csr = addr;
3041 }
6b7c5b94 3042
ba343c77 3043 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3044 db_reg = 4;
3045 } else {
ba343c77
SB
3046 if (be_physfn(adapter))
3047 db_reg = 4;
3048 else
3049 db_reg = 0;
3050 }
3051 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3052 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3053 if (addr == NULL)
3054 goto pci_map_err;
ba343c77
SB
3055 adapter->db = addr;
3056
6b7c5b94
SP
3057 return 0;
3058pci_map_err:
3059 be_unmap_pci_bars(adapter);
3060 return -ENOMEM;
3061}
3062
3063
3064static void be_ctrl_cleanup(struct be_adapter *adapter)
3065{
8788fdc2 3066 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3067
3068 be_unmap_pci_bars(adapter);
3069
3070 if (mem->va)
2b7bcebf
IV
3071 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3072 mem->dma);
e7b909a6 3073
5b8821b7 3074 mem = &adapter->rx_filter;
e7b909a6 3075 if (mem->va)
2b7bcebf
IV
3076 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3077 mem->dma);
6b7c5b94
SP
3078}
3079
6b7c5b94
SP
3080static int be_ctrl_init(struct be_adapter *adapter)
3081{
8788fdc2
SP
3082 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3083 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3084 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3085 int status;
6b7c5b94
SP
3086
3087 status = be_map_pci_bars(adapter);
3088 if (status)
e7b909a6 3089 goto done;
6b7c5b94
SP
3090
3091 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3092 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3093 mbox_mem_alloc->size,
3094 &mbox_mem_alloc->dma,
3095 GFP_KERNEL);
6b7c5b94 3096 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3097 status = -ENOMEM;
3098 goto unmap_pci_bars;
6b7c5b94
SP
3099 }
3100 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3101 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3102 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3103 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3104
5b8821b7
SP
3105 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3106 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3107 &rx_filter->dma, GFP_KERNEL);
3108 if (rx_filter->va == NULL) {
e7b909a6
SP
3109 status = -ENOMEM;
3110 goto free_mbox;
3111 }
5b8821b7 3112 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3113
2984961c 3114 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3115 spin_lock_init(&adapter->mcc_lock);
3116 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3117
dd131e76 3118 init_completion(&adapter->flash_compl);
cf588477 3119 pci_save_state(adapter->pdev);
6b7c5b94 3120 return 0;
e7b909a6
SP
3121
3122free_mbox:
2b7bcebf
IV
3123 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3124 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3125
3126unmap_pci_bars:
3127 be_unmap_pci_bars(adapter);
3128
3129done:
3130 return status;
6b7c5b94
SP
3131}
3132
3133static void be_stats_cleanup(struct be_adapter *adapter)
3134{
3abcdeda 3135 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3136
3137 if (cmd->va)
2b7bcebf
IV
3138 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3139 cmd->va, cmd->dma);
6b7c5b94
SP
3140}
3141
3142static int be_stats_init(struct be_adapter *adapter)
3143{
3abcdeda 3144 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3145
005d5696 3146 if (adapter->generation == BE_GEN2) {
89a88ab8 3147 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3148 } else {
3149 if (lancer_chip(adapter))
3150 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3151 else
3152 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3153 }
2b7bcebf
IV
3154 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3155 GFP_KERNEL);
6b7c5b94
SP
3156 if (cmd->va == NULL)
3157 return -1;
d291b9af 3158 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3159 return 0;
3160}
3161
3162static void __devexit be_remove(struct pci_dev *pdev)
3163{
3164 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3165
6b7c5b94
SP
3166 if (!adapter)
3167 return;
3168
f203af70
SK
3169 cancel_delayed_work_sync(&adapter->work);
3170
6b7c5b94
SP
3171 unregister_netdev(adapter->netdev);
3172
5fb379ee
SP
3173 be_clear(adapter);
3174
6b7c5b94
SP
3175 be_stats_cleanup(adapter);
3176
3177 be_ctrl_cleanup(adapter);
3178
ba343c77
SB
3179 be_sriov_disable(adapter);
3180
8d56ff11 3181 be_msix_disable(adapter);
6b7c5b94
SP
3182
3183 pci_set_drvdata(pdev, NULL);
3184 pci_release_regions(pdev);
3185 pci_disable_device(pdev);
3186
3187 free_netdev(adapter->netdev);
3188}
3189
2243e2e9 3190static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3191{
6b7c5b94
SP
3192 int status;
3193
3abcdeda
SP
3194 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3195 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3196 if (status)
3197 return status;
3198
752961a1 3199 if (adapter->function_mode & FLEX10_MODE)
82903e4b
AK
3200 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3201 else
3202 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3203
9e1453c5
AK
3204 status = be_cmd_get_cntl_attributes(adapter);
3205 if (status)
3206 return status;
3207
2243e2e9 3208 return 0;
6b7c5b94
SP
3209}
3210
fe6d2a38
SP
3211static int be_dev_family_check(struct be_adapter *adapter)
3212{
3213 struct pci_dev *pdev = adapter->pdev;
3214 u32 sli_intf = 0, if_type;
3215
3216 switch (pdev->device) {
3217 case BE_DEVICE_ID1:
3218 case OC_DEVICE_ID1:
3219 adapter->generation = BE_GEN2;
3220 break;
3221 case BE_DEVICE_ID2:
3222 case OC_DEVICE_ID2:
3223 adapter->generation = BE_GEN3;
3224 break;
3225 case OC_DEVICE_ID3:
12f4d0a8 3226 case OC_DEVICE_ID4:
fe6d2a38
SP
3227 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3228 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3229 SLI_INTF_IF_TYPE_SHIFT;
3230
3231 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3232 if_type != 0x02) {
3233 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3234 return -EINVAL;
3235 }
fe6d2a38
SP
3236 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3237 SLI_INTF_FAMILY_SHIFT);
3238 adapter->generation = BE_GEN3;
3239 break;
3240 default:
3241 adapter->generation = 0;
3242 }
3243 return 0;
3244}
3245
37eed1cb
PR
3246static int lancer_wait_ready(struct be_adapter *adapter)
3247{
3248#define SLIPORT_READY_TIMEOUT 500
3249 u32 sliport_status;
3250 int status = 0, i;
3251
3252 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3253 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3254 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3255 break;
3256
3257 msleep(20);
3258 }
3259
3260 if (i == SLIPORT_READY_TIMEOUT)
3261 status = -1;
3262
3263 return status;
3264}
3265
3266static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3267{
3268 int status;
3269 u32 sliport_status, err, reset_needed;
3270 status = lancer_wait_ready(adapter);
3271 if (!status) {
3272 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3273 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3274 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3275 if (err && reset_needed) {
3276 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3277 adapter->db + SLIPORT_CONTROL_OFFSET);
3278
3279 /* check adapter has corrected the error */
3280 status = lancer_wait_ready(adapter);
3281 sliport_status = ioread32(adapter->db +
3282 SLIPORT_STATUS_OFFSET);
3283 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3284 SLIPORT_STATUS_RN_MASK);
3285 if (status || sliport_status)
3286 status = -1;
3287 } else if (err || reset_needed) {
3288 status = -1;
3289 }
3290 }
3291 return status;
3292}
3293
6b7c5b94
SP
3294static int __devinit be_probe(struct pci_dev *pdev,
3295 const struct pci_device_id *pdev_id)
3296{
3297 int status = 0;
3298 struct be_adapter *adapter;
3299 struct net_device *netdev;
6b7c5b94
SP
3300
3301 status = pci_enable_device(pdev);
3302 if (status)
3303 goto do_none;
3304
3305 status = pci_request_regions(pdev, DRV_NAME);
3306 if (status)
3307 goto disable_dev;
3308 pci_set_master(pdev);
3309
3c8def97 3310 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3311 if (netdev == NULL) {
3312 status = -ENOMEM;
3313 goto rel_reg;
3314 }
3315 adapter = netdev_priv(netdev);
3316 adapter->pdev = pdev;
3317 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3318
3319 status = be_dev_family_check(adapter);
63657b9c 3320 if (status)
fe6d2a38
SP
3321 goto free_netdev;
3322
6b7c5b94 3323 adapter->netdev = netdev;
2243e2e9 3324 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3325
2b7bcebf 3326 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3327 if (!status) {
3328 netdev->features |= NETIF_F_HIGHDMA;
3329 } else {
2b7bcebf 3330 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3331 if (status) {
3332 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3333 goto free_netdev;
3334 }
3335 }
3336
f9449ab7
SP
3337 status = be_sriov_enable(adapter);
3338 if (status)
3339 goto free_netdev;
ba343c77 3340
6b7c5b94
SP
3341 status = be_ctrl_init(adapter);
3342 if (status)
f9449ab7 3343 goto disable_sriov;
6b7c5b94 3344
37eed1cb
PR
3345 if (lancer_chip(adapter)) {
3346 status = lancer_test_and_set_rdy_state(adapter);
3347 if (status) {
3348 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3349 goto ctrl_clean;
37eed1cb
PR
3350 }
3351 }
3352
2243e2e9 3353 /* sync up with fw's ready state */
ba343c77
SB
3354 if (be_physfn(adapter)) {
3355 status = be_cmd_POST(adapter);
3356 if (status)
3357 goto ctrl_clean;
ba343c77 3358 }
6b7c5b94 3359
2243e2e9
SP
3360 /* tell fw we're ready to fire cmds */
3361 status = be_cmd_fw_init(adapter);
6b7c5b94 3362 if (status)
2243e2e9
SP
3363 goto ctrl_clean;
3364
a4b4dfab
AK
3365 status = be_cmd_reset_function(adapter);
3366 if (status)
3367 goto ctrl_clean;
556ae191 3368
2243e2e9
SP
3369 status = be_stats_init(adapter);
3370 if (status)
3371 goto ctrl_clean;
3372
3373 status = be_get_config(adapter);
6b7c5b94
SP
3374 if (status)
3375 goto stats_clean;
6b7c5b94 3376
b9ab82c7
SP
3377 /* The INTR bit may be set in the card when probed by a kdump kernel
3378 * after a crash.
3379 */
3380 if (!lancer_chip(adapter))
3381 be_intr_set(adapter, false);
3382
3abcdeda
SP
3383 be_msix_enable(adapter);
3384
6b7c5b94 3385 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3386 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3387
5fb379ee
SP
3388 status = be_setup(adapter);
3389 if (status)
3abcdeda 3390 goto msix_disable;
2243e2e9 3391
3abcdeda 3392 be_netdev_init(netdev);
6b7c5b94
SP
3393 status = register_netdev(netdev);
3394 if (status != 0)
5fb379ee 3395 goto unsetup;
6b7c5b94 3396
c4ca2374 3397 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3398
f203af70 3399 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3400 return 0;
3401
5fb379ee
SP
3402unsetup:
3403 be_clear(adapter);
3abcdeda
SP
3404msix_disable:
3405 be_msix_disable(adapter);
6b7c5b94
SP
3406stats_clean:
3407 be_stats_cleanup(adapter);
3408ctrl_clean:
3409 be_ctrl_cleanup(adapter);
f9449ab7 3410disable_sriov:
ba343c77 3411 be_sriov_disable(adapter);
f9449ab7 3412free_netdev:
fe6d2a38 3413 free_netdev(netdev);
8d56ff11 3414 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3415rel_reg:
3416 pci_release_regions(pdev);
3417disable_dev:
3418 pci_disable_device(pdev);
3419do_none:
c4ca2374 3420 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3421 return status;
3422}
3423
3424static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3425{
3426 struct be_adapter *adapter = pci_get_drvdata(pdev);
3427 struct net_device *netdev = adapter->netdev;
3428
a4ca055f 3429 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3430 if (adapter->wol)
3431 be_setup_wol(adapter, true);
3432
6b7c5b94
SP
3433 netif_device_detach(netdev);
3434 if (netif_running(netdev)) {
3435 rtnl_lock();
3436 be_close(netdev);
3437 rtnl_unlock();
3438 }
9b0365f1 3439 be_clear(adapter);
6b7c5b94 3440
a4ca055f 3441 be_msix_disable(adapter);
6b7c5b94
SP
3442 pci_save_state(pdev);
3443 pci_disable_device(pdev);
3444 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3445 return 0;
3446}
3447
3448static int be_resume(struct pci_dev *pdev)
3449{
3450 int status = 0;
3451 struct be_adapter *adapter = pci_get_drvdata(pdev);
3452 struct net_device *netdev = adapter->netdev;
3453
3454 netif_device_detach(netdev);
3455
3456 status = pci_enable_device(pdev);
3457 if (status)
3458 return status;
3459
3460 pci_set_power_state(pdev, 0);
3461 pci_restore_state(pdev);
3462
a4ca055f 3463 be_msix_enable(adapter);
2243e2e9
SP
3464 /* tell fw we're ready to fire cmds */
3465 status = be_cmd_fw_init(adapter);
3466 if (status)
3467 return status;
3468
9b0365f1 3469 be_setup(adapter);
6b7c5b94
SP
3470 if (netif_running(netdev)) {
3471 rtnl_lock();
3472 be_open(netdev);
3473 rtnl_unlock();
3474 }
3475 netif_device_attach(netdev);
71d8d1b5
AK
3476
3477 if (adapter->wol)
3478 be_setup_wol(adapter, false);
a4ca055f
AK
3479
3480 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3481 return 0;
3482}
3483
82456b03
SP
3484/*
3485 * An FLR will stop BE from DMAing any data.
3486 */
3487static void be_shutdown(struct pci_dev *pdev)
3488{
3489 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3490
2d5d4154
AK
3491 if (!adapter)
3492 return;
82456b03 3493
0f4a6828 3494 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3495
2d5d4154 3496 netif_device_detach(adapter->netdev);
82456b03 3497
82456b03
SP
3498 if (adapter->wol)
3499 be_setup_wol(adapter, true);
3500
57841869
AK
3501 be_cmd_reset_function(adapter);
3502
82456b03 3503 pci_disable_device(pdev);
82456b03
SP
3504}
3505
cf588477
SP
3506static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3507 pci_channel_state_t state)
3508{
3509 struct be_adapter *adapter = pci_get_drvdata(pdev);
3510 struct net_device *netdev = adapter->netdev;
3511
3512 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3513
3514 adapter->eeh_err = true;
3515
3516 netif_device_detach(netdev);
3517
3518 if (netif_running(netdev)) {
3519 rtnl_lock();
3520 be_close(netdev);
3521 rtnl_unlock();
3522 }
3523 be_clear(adapter);
3524
3525 if (state == pci_channel_io_perm_failure)
3526 return PCI_ERS_RESULT_DISCONNECT;
3527
3528 pci_disable_device(pdev);
3529
3530 return PCI_ERS_RESULT_NEED_RESET;
3531}
3532
3533static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3534{
3535 struct be_adapter *adapter = pci_get_drvdata(pdev);
3536 int status;
3537
3538 dev_info(&adapter->pdev->dev, "EEH reset\n");
3539 adapter->eeh_err = false;
3540
3541 status = pci_enable_device(pdev);
3542 if (status)
3543 return PCI_ERS_RESULT_DISCONNECT;
3544
3545 pci_set_master(pdev);
3546 pci_set_power_state(pdev, 0);
3547 pci_restore_state(pdev);
3548
3549 /* Check if card is ok and fw is ready */
3550 status = be_cmd_POST(adapter);
3551 if (status)
3552 return PCI_ERS_RESULT_DISCONNECT;
3553
3554 return PCI_ERS_RESULT_RECOVERED;
3555}
3556
3557static void be_eeh_resume(struct pci_dev *pdev)
3558{
3559 int status = 0;
3560 struct be_adapter *adapter = pci_get_drvdata(pdev);
3561 struct net_device *netdev = adapter->netdev;
3562
3563 dev_info(&adapter->pdev->dev, "EEH resume\n");
3564
3565 pci_save_state(pdev);
3566
3567 /* tell fw we're ready to fire cmds */
3568 status = be_cmd_fw_init(adapter);
3569 if (status)
3570 goto err;
3571
3572 status = be_setup(adapter);
3573 if (status)
3574 goto err;
3575
3576 if (netif_running(netdev)) {
3577 status = be_open(netdev);
3578 if (status)
3579 goto err;
3580 }
3581 netif_device_attach(netdev);
3582 return;
3583err:
3584 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3585}
3586
3587static struct pci_error_handlers be_eeh_handlers = {
3588 .error_detected = be_eeh_err_detected,
3589 .slot_reset = be_eeh_reset,
3590 .resume = be_eeh_resume,
3591};
3592
6b7c5b94
SP
3593static struct pci_driver be_driver = {
3594 .name = DRV_NAME,
3595 .id_table = be_dev_ids,
3596 .probe = be_probe,
3597 .remove = be_remove,
3598 .suspend = be_suspend,
cf588477 3599 .resume = be_resume,
82456b03 3600 .shutdown = be_shutdown,
cf588477 3601 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3602};
3603
3604static int __init be_init_module(void)
3605{
8e95a202
JP
3606 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3607 rx_frag_size != 2048) {
6b7c5b94
SP
3608 printk(KERN_WARNING DRV_NAME
3609 " : Module param rx_frag_size must be 2048/4096/8192."
3610 " Using 2048\n");
3611 rx_frag_size = 2048;
3612 }
6b7c5b94
SP
3613
3614 return pci_register_driver(&be_driver);
3615}
3616module_init(be_init_module);
3617
3618static void __exit be_exit_module(void)
3619{
3620 pci_unregister_driver(&be_driver);
3621}
3622module_exit(be_exit_module);
This page took 2.411327 seconds and 5 git commands to generate.