netfilter: ipset: whitespace and coding fixes detected by checkpatch.pl
[deliverable/linux.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
6b7c5b94 19#include "be.h"
8788fdc2 20#include "be_cmds.h"
65f71b8b 21#include <asm/div64.h>
6b7c5b94
SP
22
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
2e588f84 29static ushort rx_frag_size = 2048;
ba343c77 30static unsigned int num_vfs;
2e588f84 31module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 33MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 35
3abcdeda
SP
36static bool multi_rxq = true;
37module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39
6b7c5b94 40static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
47 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276
AK
50/* UE Status Low CSR */
51static char *ue_status_low_desc[] = {
52 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
86static char *ue_status_hi_desc[] = {
87 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
110 "NETC"
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
6b7c5b94
SP
120
121static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
122{
123 struct be_dma_mem *mem = &q->dma_mem;
124 if (mem->va)
2b7bcebf
IV
125 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
126 mem->dma);
6b7c5b94
SP
127}
128
129static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
130 u16 len, u16 entry_size)
131{
132 struct be_dma_mem *mem = &q->dma_mem;
133
134 memset(q, 0, sizeof(*q));
135 q->len = len;
136 q->entry_size = entry_size;
137 mem->size = len * entry_size;
2b7bcebf
IV
138 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
139 GFP_KERNEL);
6b7c5b94
SP
140 if (!mem->va)
141 return -1;
142 memset(mem->va, 0, mem->size);
143 return 0;
144}
145
8788fdc2 146static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 147{
8788fdc2 148 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
149 u32 reg = ioread32(addr);
150 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 151
cf588477
SP
152 if (adapter->eeh_err)
153 return;
154
5f0b849e 155 if (!enabled && enable)
6b7c5b94 156 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 157 else if (enabled && !enable)
6b7c5b94 158 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 159 else
6b7c5b94 160 return;
5f0b849e 161
6b7c5b94
SP
162 iowrite32(reg, addr);
163}
164
8788fdc2 165static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
166{
167 u32 val = 0;
168 val |= qid & DB_RQ_RING_ID_MASK;
169 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
170
171 wmb();
8788fdc2 172 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
173}
174
8788fdc2 175static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
176{
177 u32 val = 0;
178 val |= qid & DB_TXULP_RING_ID_MASK;
179 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
180
181 wmb();
8788fdc2 182 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
183}
184
8788fdc2 185static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
186 bool arm, bool clear_int, u16 num_popped)
187{
188 u32 val = 0;
189 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
190 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
191 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
192
193 if (adapter->eeh_err)
194 return;
195
6b7c5b94
SP
196 if (arm)
197 val |= 1 << DB_EQ_REARM_SHIFT;
198 if (clear_int)
199 val |= 1 << DB_EQ_CLR_SHIFT;
200 val |= 1 << DB_EQ_EVNT_SHIFT;
201 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 202 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
203}
204
8788fdc2 205void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
206{
207 u32 val = 0;
208 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
209 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
210 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
211
212 if (adapter->eeh_err)
213 return;
214
6b7c5b94
SP
215 if (arm)
216 val |= 1 << DB_CQ_REARM_SHIFT;
217 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 218 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
219}
220
6b7c5b94
SP
221static int be_mac_addr_set(struct net_device *netdev, void *p)
222{
223 struct be_adapter *adapter = netdev_priv(netdev);
224 struct sockaddr *addr = p;
225 int status = 0;
226
ca9e4988
AK
227 if (!is_valid_ether_addr(addr->sa_data))
228 return -EADDRNOTAVAIL;
229
ba343c77
SB
230 /* MAC addr configuration will be done in hardware for VFs
231 * by their corresponding PFs. Just copy to netdev addr here
232 */
233 if (!be_physfn(adapter))
234 goto netdev_addr;
235
f8617e08
AK
236 status = be_cmd_pmac_del(adapter, adapter->if_handle,
237 adapter->pmac_id, 0);
a65027e4
SP
238 if (status)
239 return status;
6b7c5b94 240
a65027e4 241 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 242 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 243netdev_addr:
6b7c5b94
SP
244 if (!status)
245 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
246
247 return status;
248}
249
89a88ab8
AK
250static void populate_be2_stats(struct be_adapter *adapter)
251{
252
253 struct be_drv_stats *drvs = &adapter->drv_stats;
254 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
255 struct be_port_rxf_stats_v0 *port_stats =
256 be_port_rxf_stats_from_cmd(adapter);
257 struct be_rxf_stats_v0 *rxf_stats =
258 be_rxf_stats_from_cmd(adapter);
259
260 drvs->rx_pause_frames = port_stats->rx_pause_frames;
261 drvs->rx_crc_errors = port_stats->rx_crc_errors;
262 drvs->rx_control_frames = port_stats->rx_control_frames;
263 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
264 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
265 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
266 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
267 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
268 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
269 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
270 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
271 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
272 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
273 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
274 drvs->rx_input_fifo_overflow_drop =
275 port_stats->rx_input_fifo_overflow;
276 drvs->rx_dropped_header_too_small =
277 port_stats->rx_dropped_header_too_small;
278 drvs->rx_address_match_errors =
279 port_stats->rx_address_match_errors;
280 drvs->rx_alignment_symbol_errors =
281 port_stats->rx_alignment_symbol_errors;
282
283 drvs->tx_pauseframes = port_stats->tx_pauseframes;
284 drvs->tx_controlframes = port_stats->tx_controlframes;
285
286 if (adapter->port_num)
287 drvs->jabber_events =
288 rxf_stats->port1_jabber_events;
289 else
290 drvs->jabber_events =
291 rxf_stats->port0_jabber_events;
292 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
293 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
294 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
295 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
296 drvs->forwarded_packets = rxf_stats->forwarded_packets;
297 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
298 drvs->rx_drops_no_tpre_descr =
299 rxf_stats->rx_drops_no_tpre_descr;
300 drvs->rx_drops_too_many_frags =
301 rxf_stats->rx_drops_too_many_frags;
302 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
303}
304
305static void populate_be3_stats(struct be_adapter *adapter)
306{
307 struct be_drv_stats *drvs = &adapter->drv_stats;
308 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
309
310 struct be_rxf_stats_v1 *rxf_stats =
311 be_rxf_stats_from_cmd(adapter);
312 struct be_port_rxf_stats_v1 *port_stats =
313 be_port_rxf_stats_from_cmd(adapter);
314
315 drvs->rx_priority_pause_frames = 0;
316 drvs->pmem_fifo_overflow_drop = 0;
317 drvs->rx_pause_frames = port_stats->rx_pause_frames;
318 drvs->rx_crc_errors = port_stats->rx_crc_errors;
319 drvs->rx_control_frames = port_stats->rx_control_frames;
320 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
321 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
322 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
323 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
324 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
325 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
326 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
327 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
328 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
329 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
330 drvs->rx_dropped_header_too_small =
331 port_stats->rx_dropped_header_too_small;
332 drvs->rx_input_fifo_overflow_drop =
333 port_stats->rx_input_fifo_overflow_drop;
334 drvs->rx_address_match_errors =
335 port_stats->rx_address_match_errors;
336 drvs->rx_alignment_symbol_errors =
337 port_stats->rx_alignment_symbol_errors;
338 drvs->rxpp_fifo_overflow_drop =
339 port_stats->rxpp_fifo_overflow_drop;
340 drvs->tx_pauseframes = port_stats->tx_pauseframes;
341 drvs->tx_controlframes = port_stats->tx_controlframes;
342 drvs->jabber_events = port_stats->jabber_events;
343 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347 drvs->forwarded_packets = rxf_stats->forwarded_packets;
348 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
349 drvs->rx_drops_no_tpre_descr =
350 rxf_stats->rx_drops_no_tpre_descr;
351 drvs->rx_drops_too_many_frags =
352 rxf_stats->rx_drops_too_many_frags;
353 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354}
355
005d5696
SX
356static void populate_lancer_stats(struct be_adapter *adapter)
357{
89a88ab8 358
005d5696
SX
359 struct be_drv_stats *drvs = &adapter->drv_stats;
360 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
361 (adapter);
362 drvs->rx_priority_pause_frames = 0;
363 drvs->pmem_fifo_overflow_drop = 0;
364 drvs->rx_pause_frames =
aedfebba
SX
365 make_64bit_val(pport_stats->rx_pause_frames_hi,
366 pport_stats->rx_pause_frames_lo);
005d5696
SX
367 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
368 pport_stats->rx_crc_errors_lo);
369 drvs->rx_control_frames =
370 make_64bit_val(pport_stats->rx_control_frames_hi,
371 pport_stats->rx_control_frames_lo);
372 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long =
374 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
375 pport_stats->rx_frames_too_long_lo);
376 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
377 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
378 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
379 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
380 drvs->rx_dropped_tcp_length =
381 pport_stats->rx_dropped_invalid_tcp_length;
382 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
383 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
384 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
385 drvs->rx_dropped_header_too_small =
386 pport_stats->rx_dropped_header_too_small;
387 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
389 drvs->rx_alignment_symbol_errors =
390 make_64bit_val(pport_stats->rx_symbol_errors_hi,
391 pport_stats->rx_symbol_errors_lo);
392 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
393 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
394 pport_stats->tx_pause_frames_lo);
395 drvs->tx_controlframes =
396 make_64bit_val(pport_stats->tx_control_frames_hi,
397 pport_stats->tx_control_frames_lo);
398 drvs->jabber_events = pport_stats->rx_jabbers;
399 drvs->rx_drops_no_pbuf = 0;
400 drvs->rx_drops_no_txpb = 0;
401 drvs->rx_drops_no_erx_descr = 0;
402 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
403 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
404 pport_stats->num_forwards_lo);
405 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
406 pport_stats->rx_drops_mtu_lo);
407 drvs->rx_drops_no_tpre_descr = 0;
408 drvs->rx_drops_too_many_frags =
409 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
410 pport_stats->rx_drops_too_many_frags_lo);
411}
89a88ab8
AK
412
413void be_parse_stats(struct be_adapter *adapter)
414{
005d5696
SX
415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
89a88ab8 421 populate_be2_stats(adapter);
005d5696 422 }
89a88ab8
AK
423}
424
b31c50a7 425void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 426{
89a88ab8 427 struct be_drv_stats *drvs = &adapter->drv_stats;
78122a52 428 struct net_device_stats *dev_stats = &adapter->netdev->stats;
3abcdeda
SP
429 struct be_rx_obj *rxo;
430 int i;
6b7c5b94 431
3abcdeda
SP
432 memset(dev_stats, 0, sizeof(*dev_stats));
433 for_all_rx_queues(adapter, rxo, i) {
434 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
435 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
436 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
437 /* no space in linux buffers: best possible approximation */
89a88ab8 438 if (adapter->generation == BE_GEN3) {
005d5696
SX
439 if (!(lancer_chip(adapter))) {
440 struct be_erx_stats_v1 *erx_stats =
89a88ab8 441 be_erx_stats_from_cmd(adapter);
005d5696 442 dev_stats->rx_dropped +=
89a88ab8 443 erx_stats->rx_drops_no_fragments[rxo->q.id];
005d5696 444 }
89a88ab8
AK
445 } else {
446 struct be_erx_stats_v0 *erx_stats =
447 be_erx_stats_from_cmd(adapter);
448 dev_stats->rx_dropped +=
449 erx_stats->rx_drops_no_fragments[rxo->q.id];
450 }
3abcdeda
SP
451 }
452
453 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
454 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
6b7c5b94
SP
455
456 /* bad pkts received */
89a88ab8
AK
457 dev_stats->rx_errors = drvs->rx_crc_errors +
458 drvs->rx_alignment_symbol_errors +
459 drvs->rx_in_range_errors +
460 drvs->rx_out_range_errors +
461 drvs->rx_frame_too_long +
462 drvs->rx_dropped_too_small +
463 drvs->rx_dropped_too_short +
464 drvs->rx_dropped_header_too_small +
465 drvs->rx_dropped_tcp_length +
466 drvs->rx_dropped_runt +
467 drvs->rx_tcp_checksum_errs +
468 drvs->rx_ip_checksum_errs +
469 drvs->rx_udp_checksum_errs;
68110868 470
6b7c5b94 471 /* detailed rx errors */
89a88ab8
AK
472 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
473 drvs->rx_out_range_errors +
474 drvs->rx_frame_too_long;
68110868 475
89a88ab8 476 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
477
478 /* frame alignment errors */
89a88ab8 479 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 480
6b7c5b94
SP
481 /* receiver fifo overrun */
482 /* drops_no_pbuf is no per i/f, it's per BE card */
89a88ab8
AK
483 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
484 drvs->rx_input_fifo_overflow_drop +
485 drvs->rx_drops_no_pbuf;
6b7c5b94
SP
486}
487
8788fdc2 488void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 489{
6b7c5b94
SP
490 struct net_device *netdev = adapter->netdev;
491
6b7c5b94 492 /* If link came up or went down */
a8f447bd 493 if (adapter->link_up != link_up) {
0dffc83e 494 adapter->link_speed = -1;
a8f447bd 495 if (link_up) {
6b7c5b94
SP
496 netif_carrier_on(netdev);
497 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd 498 } else {
a8f447bd
SP
499 netif_carrier_off(netdev);
500 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 501 }
a8f447bd 502 adapter->link_up = link_up;
6b7c5b94 503 }
6b7c5b94
SP
504}
505
506/* Update the EQ delay n BE based on the RX frags consumed / sec */
3abcdeda 507static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 508{
3abcdeda
SP
509 struct be_eq_obj *rx_eq = &rxo->rx_eq;
510 struct be_rx_stats *stats = &rxo->stats;
4097f663
SP
511 ulong now = jiffies;
512 u32 eqd;
513
514 if (!rx_eq->enable_aic)
515 return;
516
517 /* Wrapped around */
518 if (time_before(now, stats->rx_fps_jiffies)) {
519 stats->rx_fps_jiffies = now;
520 return;
521 }
6b7c5b94
SP
522
523 /* Update once a second */
4097f663 524 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
525 return;
526
3abcdeda 527 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
4097f663 528 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 529
4097f663 530 stats->rx_fps_jiffies = now;
3abcdeda
SP
531 stats->prev_rx_frags = stats->rx_frags;
532 eqd = stats->rx_fps / 110000;
6b7c5b94
SP
533 eqd = eqd << 3;
534 if (eqd > rx_eq->max_eqd)
535 eqd = rx_eq->max_eqd;
536 if (eqd < rx_eq->min_eqd)
537 eqd = rx_eq->min_eqd;
538 if (eqd < 10)
539 eqd = 0;
540 if (eqd != rx_eq->cur_eqd)
8788fdc2 541 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
542
543 rx_eq->cur_eqd = eqd;
544}
545
65f71b8b
SH
546static u32 be_calc_rate(u64 bytes, unsigned long ticks)
547{
548 u64 rate = bytes;
549
550 do_div(rate, ticks / HZ);
551 rate <<= 3; /* bytes/sec -> bits/sec */
552 do_div(rate, 1000000ul); /* MB/Sec */
553
554 return rate;
555}
556
4097f663
SP
557static void be_tx_rate_update(struct be_adapter *adapter)
558{
3abcdeda 559 struct be_tx_stats *stats = tx_stats(adapter);
4097f663
SP
560 ulong now = jiffies;
561
562 /* Wrapped around? */
563 if (time_before(now, stats->be_tx_jiffies)) {
564 stats->be_tx_jiffies = now;
565 return;
566 }
567
568 /* Update tx rate once in two seconds */
569 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
570 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
571 - stats->be_tx_bytes_prev,
572 now - stats->be_tx_jiffies);
4097f663
SP
573 stats->be_tx_jiffies = now;
574 stats->be_tx_bytes_prev = stats->be_tx_bytes;
575 }
576}
577
6b7c5b94 578static void be_tx_stats_update(struct be_adapter *adapter,
91992e44 579 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 580{
3abcdeda 581 struct be_tx_stats *stats = tx_stats(adapter);
6b7c5b94
SP
582 stats->be_tx_reqs++;
583 stats->be_tx_wrbs += wrb_cnt;
584 stats->be_tx_bytes += copied;
91992e44 585 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94
SP
586 if (stopped)
587 stats->be_tx_stops++;
6b7c5b94
SP
588}
589
590/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
591static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
592 bool *dummy)
6b7c5b94 593{
ebc8d2ab
DM
594 int cnt = (skb->len > skb->data_len);
595
596 cnt += skb_shinfo(skb)->nr_frags;
597
6b7c5b94
SP
598 /* to account for hdr wrb */
599 cnt++;
fe6d2a38
SP
600 if (lancer_chip(adapter) || !(cnt & 1)) {
601 *dummy = false;
602 } else {
6b7c5b94
SP
603 /* add a dummy to make it an even num */
604 cnt++;
605 *dummy = true;
fe6d2a38 606 }
6b7c5b94
SP
607 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
608 return cnt;
609}
610
611static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
612{
613 wrb->frag_pa_hi = upper_32_bits(addr);
614 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
615 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
616}
617
cc4ce020
SK
618static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
619 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 620{
cc4ce020
SK
621 u8 vlan_prio = 0;
622 u16 vlan_tag = 0;
623
6b7c5b94
SP
624 memset(hdr, 0, sizeof(*hdr));
625
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
627
49e4b847 628 if (skb_is_gso(skb)) {
6b7c5b94
SP
629 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
630 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
631 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 632 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 633 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
634 if (lancer_chip(adapter) && adapter->sli_family ==
635 LANCER_A0_SLI_FAMILY) {
636 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
637 if (is_tcp_pkt(skb))
638 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
639 tcpcs, hdr, 1);
640 else if (is_udp_pkt(skb))
641 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
642 udpcs, hdr, 1);
643 }
6b7c5b94
SP
644 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
645 if (is_tcp_pkt(skb))
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
647 else if (is_udp_pkt(skb))
648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
649 }
650
cc4ce020 651 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
6b7c5b94 652 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
653 vlan_tag = vlan_tx_tag_get(skb);
654 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
655 /* If vlan priority provided by OS is NOT in available bmap */
656 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
657 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
658 adapter->recommended_prio;
659 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
660 }
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666}
667
2b7bcebf 668static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
669 bool unmap_single)
670{
671 dma_addr_t dma;
672
673 be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 676 if (wrb->frag_len) {
7101e111 677 if (unmap_single)
2b7bcebf
IV
678 dma_unmap_single(dev, dma, wrb->frag_len,
679 DMA_TO_DEVICE);
7101e111 680 else
2b7bcebf 681 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
682 }
683}
6b7c5b94
SP
684
685static int make_tx_wrbs(struct be_adapter *adapter,
686 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687{
7101e111
SP
688 dma_addr_t busaddr;
689 int i, copied = 0;
2b7bcebf 690 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
691 struct sk_buff *first_skb = skb;
692 struct be_queue_info *txq = &adapter->tx_obj.q;
693 struct be_eth_wrb *wrb;
694 struct be_eth_hdr_wrb *hdr;
7101e111
SP
695 bool map_single = false;
696 u16 map_head;
6b7c5b94 697
6b7c5b94
SP
698 hdr = queue_head_node(txq);
699 queue_head_inc(txq);
7101e111 700 map_head = txq->head;
6b7c5b94 701
ebc8d2ab 702 if (skb->len > skb->data_len) {
e743d313 703 int len = skb_headlen(skb);
2b7bcebf
IV
704 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
705 if (dma_mapping_error(dev, busaddr))
7101e111
SP
706 goto dma_err;
707 map_single = true;
ebc8d2ab
DM
708 wrb = queue_head_node(txq);
709 wrb_fill(wrb, busaddr, len);
710 be_dws_cpu_to_le(wrb, sizeof(*wrb));
711 queue_head_inc(txq);
712 copied += len;
713 }
6b7c5b94 714
ebc8d2ab
DM
715 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
716 struct skb_frag_struct *frag =
717 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
718 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
719 frag->size, DMA_TO_DEVICE);
720 if (dma_mapping_error(dev, busaddr))
7101e111 721 goto dma_err;
ebc8d2ab
DM
722 wrb = queue_head_node(txq);
723 wrb_fill(wrb, busaddr, frag->size);
724 be_dws_cpu_to_le(wrb, sizeof(*wrb));
725 queue_head_inc(txq);
726 copied += frag->size;
6b7c5b94
SP
727 }
728
729 if (dummy_wrb) {
730 wrb = queue_head_node(txq);
731 wrb_fill(wrb, 0, 0);
732 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733 queue_head_inc(txq);
734 }
735
cc4ce020 736 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
737 be_dws_cpu_to_le(hdr, sizeof(*hdr));
738
739 return copied;
7101e111
SP
740dma_err:
741 txq->head = map_head;
742 while (copied) {
743 wrb = queue_head_node(txq);
2b7bcebf 744 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
745 map_single = false;
746 copied -= wrb->frag_len;
747 queue_head_inc(txq);
748 }
749 return 0;
6b7c5b94
SP
750}
751
61357325 752static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 753 struct net_device *netdev)
6b7c5b94
SP
754{
755 struct be_adapter *adapter = netdev_priv(netdev);
756 struct be_tx_obj *tx_obj = &adapter->tx_obj;
757 struct be_queue_info *txq = &tx_obj->q;
758 u32 wrb_cnt = 0, copied = 0;
759 u32 start = txq->head;
760 bool dummy_wrb, stopped = false;
761
fe6d2a38 762 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94
SP
763
764 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
765 if (copied) {
766 /* record the sent skb in the sent_skb table */
767 BUG_ON(tx_obj->sent_skb_list[start]);
768 tx_obj->sent_skb_list[start] = skb;
769
770 /* Ensure txq has space for the next skb; Else stop the queue
771 * *BEFORE* ringing the tx doorbell, so that we serialze the
772 * tx compls of the current transmit which'll wake up the queue
773 */
7101e111 774 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
775 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
776 txq->len) {
777 netif_stop_queue(netdev);
778 stopped = true;
779 }
6b7c5b94 780
c190e3c8 781 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 782
91992e44
AK
783 be_tx_stats_update(adapter, wrb_cnt, copied,
784 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
785 } else {
786 txq->head = start;
787 dev_kfree_skb_any(skb);
6b7c5b94 788 }
6b7c5b94
SP
789 return NETDEV_TX_OK;
790}
791
792static int be_change_mtu(struct net_device *netdev, int new_mtu)
793{
794 struct be_adapter *adapter = netdev_priv(netdev);
795 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
796 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
797 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
798 dev_info(&adapter->pdev->dev,
799 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
800 BE_MIN_MTU,
801 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
802 return -EINVAL;
803 }
804 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
805 netdev->mtu, new_mtu);
806 netdev->mtu = new_mtu;
807 return 0;
808}
809
810/*
82903e4b
AK
811 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
812 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 813 */
1da87b7f 814static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 815{
6b7c5b94
SP
816 u16 vtag[BE_NUM_VLANS_SUPPORTED];
817 u16 ntags = 0, i;
82903e4b 818 int status = 0;
1da87b7f
AK
819 u32 if_handle;
820
821 if (vf) {
822 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
823 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
824 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
825 }
6b7c5b94 826
82903e4b 827 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 828 /* Construct VLAN Table to give to HW */
b738127d 829 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
830 if (adapter->vlan_tag[i]) {
831 vtag[ntags] = cpu_to_le16(i);
832 ntags++;
833 }
834 }
b31c50a7
SP
835 status = be_cmd_vlan_config(adapter, adapter->if_handle,
836 vtag, ntags, 1, 0);
6b7c5b94 837 } else {
b31c50a7
SP
838 status = be_cmd_vlan_config(adapter, adapter->if_handle,
839 NULL, 0, 1, 1);
6b7c5b94 840 }
1da87b7f 841
b31c50a7 842 return status;
6b7c5b94
SP
843}
844
845static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
846{
847 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 848
6b7c5b94 849 adapter->vlan_grp = grp;
6b7c5b94
SP
850}
851
852static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
853{
854 struct be_adapter *adapter = netdev_priv(netdev);
855
1da87b7f 856 adapter->vlans_added++;
ba343c77
SB
857 if (!be_physfn(adapter))
858 return;
859
6b7c5b94 860 adapter->vlan_tag[vid] = 1;
82903e4b 861 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 862 be_vid_config(adapter, false, 0);
6b7c5b94
SP
863}
864
865static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
866{
867 struct be_adapter *adapter = netdev_priv(netdev);
868
1da87b7f
AK
869 adapter->vlans_added--;
870 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
871
ba343c77
SB
872 if (!be_physfn(adapter))
873 return;
874
6b7c5b94 875 adapter->vlan_tag[vid] = 0;
82903e4b 876 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 877 be_vid_config(adapter, false, 0);
6b7c5b94
SP
878}
879
24307eef 880static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
881{
882 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 883
24307eef 884 if (netdev->flags & IFF_PROMISC) {
ecd0bf0f 885 be_cmd_promiscuous_config(adapter, true);
24307eef
SP
886 adapter->promiscuous = true;
887 goto done;
6b7c5b94
SP
888 }
889
25985edc 890 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
891 if (adapter->promiscuous) {
892 adapter->promiscuous = false;
ecd0bf0f 893 be_cmd_promiscuous_config(adapter, false);
6b7c5b94
SP
894 }
895
e7b909a6 896 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
897 if (netdev->flags & IFF_ALLMULTI ||
898 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 899 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 900 &adapter->mc_cmd_mem);
24307eef 901 goto done;
6b7c5b94 902 }
6b7c5b94 903
0ddf477b 904 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 905 &adapter->mc_cmd_mem);
24307eef
SP
906done:
907 return;
6b7c5b94
SP
908}
909
ba343c77
SB
910static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
911{
912 struct be_adapter *adapter = netdev_priv(netdev);
913 int status;
914
915 if (!adapter->sriov_enabled)
916 return -EPERM;
917
918 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
919 return -EINVAL;
920
64600ea5
AK
921 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
922 status = be_cmd_pmac_del(adapter,
923 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 924 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 925
64600ea5
AK
926 status = be_cmd_pmac_add(adapter, mac,
927 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 928 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
929
930 if (status)
ba343c77
SB
931 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
932 mac, vf);
64600ea5
AK
933 else
934 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
935
ba343c77
SB
936 return status;
937}
938
64600ea5
AK
939static int be_get_vf_config(struct net_device *netdev, int vf,
940 struct ifla_vf_info *vi)
941{
942 struct be_adapter *adapter = netdev_priv(netdev);
943
944 if (!adapter->sriov_enabled)
945 return -EPERM;
946
947 if (vf >= num_vfs)
948 return -EINVAL;
949
950 vi->vf = vf;
e1d18735 951 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 952 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
953 vi->qos = 0;
954 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
955
956 return 0;
957}
958
1da87b7f
AK
959static int be_set_vf_vlan(struct net_device *netdev,
960 int vf, u16 vlan, u8 qos)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
963 int status = 0;
964
965 if (!adapter->sriov_enabled)
966 return -EPERM;
967
968 if ((vf >= num_vfs) || (vlan > 4095))
969 return -EINVAL;
970
971 if (vlan) {
972 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
973 adapter->vlans_added++;
974 } else {
975 adapter->vf_cfg[vf].vf_vlan_tag = 0;
976 adapter->vlans_added--;
977 }
978
979 status = be_vid_config(adapter, true, vf);
980
981 if (status)
982 dev_info(&adapter->pdev->dev,
983 "VLAN %d config on VF %d failed\n", vlan, vf);
984 return status;
985}
986
e1d18735
AK
987static int be_set_vf_tx_rate(struct net_device *netdev,
988 int vf, int rate)
989{
990 struct be_adapter *adapter = netdev_priv(netdev);
991 int status = 0;
992
993 if (!adapter->sriov_enabled)
994 return -EPERM;
995
996 if ((vf >= num_vfs) || (rate < 0))
997 return -EINVAL;
998
999 if (rate > 10000)
1000 rate = 10000;
1001
1002 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 1003 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1004
1005 if (status)
1006 dev_info(&adapter->pdev->dev,
1007 "tx rate %d on VF %d failed\n", rate, vf);
1008 return status;
1009}
1010
3abcdeda 1011static void be_rx_rate_update(struct be_rx_obj *rxo)
6b7c5b94 1012{
3abcdeda 1013 struct be_rx_stats *stats = &rxo->stats;
4097f663 1014 ulong now = jiffies;
6b7c5b94 1015
4097f663 1016 /* Wrapped around */
3abcdeda
SP
1017 if (time_before(now, stats->rx_jiffies)) {
1018 stats->rx_jiffies = now;
4097f663
SP
1019 return;
1020 }
6b7c5b94
SP
1021
1022 /* Update the rate once in two seconds */
3abcdeda 1023 if ((now - stats->rx_jiffies) < 2 * HZ)
6b7c5b94
SP
1024 return;
1025
3abcdeda
SP
1026 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1027 now - stats->rx_jiffies);
1028 stats->rx_jiffies = now;
1029 stats->rx_bytes_prev = stats->rx_bytes;
6b7c5b94
SP
1030}
1031
3abcdeda 1032static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1033 struct be_rx_compl_info *rxcp)
4097f663 1034{
3abcdeda 1035 struct be_rx_stats *stats = &rxo->stats;
1ef78abe 1036
3abcdeda 1037 stats->rx_compl++;
2e588f84
SP
1038 stats->rx_frags += rxcp->num_rcvd;
1039 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1040 stats->rx_pkts++;
2e588f84 1041 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1042 stats->rx_mcast_pkts++;
2e588f84
SP
1043 if (rxcp->err)
1044 stats->rxcp_err++;
4097f663
SP
1045}
1046
2e588f84 1047static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1048{
19fad86f
PR
1049 /* L4 checksum is not reliable for non TCP/UDP packets.
1050 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1051 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1052 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1053}
1054
6b7c5b94 1055static struct be_rx_page_info *
3abcdeda
SP
1056get_rx_page_info(struct be_adapter *adapter,
1057 struct be_rx_obj *rxo,
1058 u16 frag_idx)
6b7c5b94
SP
1059{
1060 struct be_rx_page_info *rx_page_info;
3abcdeda 1061 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1062
3abcdeda 1063 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1064 BUG_ON(!rx_page_info->page);
1065
205859a2 1066 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1067 dma_unmap_page(&adapter->pdev->dev,
1068 dma_unmap_addr(rx_page_info, bus),
1069 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1070 rx_page_info->last_page_user = false;
1071 }
6b7c5b94
SP
1072
1073 atomic_dec(&rxq->used);
1074 return rx_page_info;
1075}
1076
1077/* Throwaway the data in the Rx completion */
1078static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1079 struct be_rx_obj *rxo,
2e588f84 1080 struct be_rx_compl_info *rxcp)
6b7c5b94 1081{
3abcdeda 1082 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1083 struct be_rx_page_info *page_info;
2e588f84 1084 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1085
e80d9da6 1086 for (i = 0; i < num_rcvd; i++) {
2e588f84 1087 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1088 put_page(page_info->page);
1089 memset(page_info, 0, sizeof(*page_info));
2e588f84 1090 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1091 }
1092}
1093
1094/*
1095 * skb_fill_rx_data forms a complete skb for an ether frame
1096 * indicated by rxcp.
1097 */
3abcdeda 1098static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1099 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1100{
3abcdeda 1101 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1102 struct be_rx_page_info *page_info;
2e588f84
SP
1103 u16 i, j;
1104 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1105 u8 *start;
6b7c5b94 1106
2e588f84 1107 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1108 start = page_address(page_info->page) + page_info->page_offset;
1109 prefetch(start);
1110
1111 /* Copy data in the first descriptor of this completion */
2e588f84 1112 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1113
1114 /* Copy the header portion into skb_data */
2e588f84 1115 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1116 memcpy(skb->data, start, hdr_len);
1117 skb->len = curr_frag_len;
1118 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1119 /* Complete packet has now been moved to data */
1120 put_page(page_info->page);
1121 skb->data_len = 0;
1122 skb->tail += curr_frag_len;
1123 } else {
1124 skb_shinfo(skb)->nr_frags = 1;
1125 skb_shinfo(skb)->frags[0].page = page_info->page;
1126 skb_shinfo(skb)->frags[0].page_offset =
1127 page_info->page_offset + hdr_len;
1128 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1129 skb->data_len = curr_frag_len - hdr_len;
1130 skb->tail += hdr_len;
1131 }
205859a2 1132 page_info->page = NULL;
6b7c5b94 1133
2e588f84
SP
1134 if (rxcp->pkt_size <= rx_frag_size) {
1135 BUG_ON(rxcp->num_rcvd != 1);
1136 return;
6b7c5b94
SP
1137 }
1138
1139 /* More frags present for this completion */
2e588f84
SP
1140 index_inc(&rxcp->rxq_idx, rxq->len);
1141 remaining = rxcp->pkt_size - curr_frag_len;
1142 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1143 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1144 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1145
bd46cb6c
AK
1146 /* Coalesce all frags from the same physical page in one slot */
1147 if (page_info->page_offset == 0) {
1148 /* Fresh page */
1149 j++;
1150 skb_shinfo(skb)->frags[j].page = page_info->page;
1151 skb_shinfo(skb)->frags[j].page_offset =
1152 page_info->page_offset;
1153 skb_shinfo(skb)->frags[j].size = 0;
1154 skb_shinfo(skb)->nr_frags++;
1155 } else {
1156 put_page(page_info->page);
1157 }
1158
1159 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
1160 skb->len += curr_frag_len;
1161 skb->data_len += curr_frag_len;
6b7c5b94 1162
2e588f84
SP
1163 remaining -= curr_frag_len;
1164 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1165 page_info->page = NULL;
6b7c5b94 1166 }
bd46cb6c 1167 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1168}
1169
5be93b9a 1170/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1171static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1172 struct be_rx_obj *rxo,
2e588f84 1173 struct be_rx_compl_info *rxcp)
6b7c5b94 1174{
6332c8d3 1175 struct net_device *netdev = adapter->netdev;
6b7c5b94 1176 struct sk_buff *skb;
89420424 1177
6332c8d3 1178 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1179 if (unlikely(!skb)) {
6b7c5b94
SP
1180 if (net_ratelimit())
1181 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
3abcdeda 1182 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1183 return;
1184 }
1185
2e588f84 1186 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1187
6332c8d3 1188 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1189 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1190 else
1191 skb_checksum_none_assert(skb);
6b7c5b94
SP
1192
1193 skb->truesize = skb->len + sizeof(struct sk_buff);
6332c8d3 1194 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1195 if (adapter->netdev->features & NETIF_F_RXHASH)
1196 skb->rxhash = rxcp->rss_hash;
1197
6b7c5b94 1198
2e588f84 1199 if (unlikely(rxcp->vlanf)) {
82903e4b 1200 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
6b7c5b94
SP
1201 kfree_skb(skb);
1202 return;
1203 }
6709d952
SK
1204 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1205 rxcp->vlan_tag);
6b7c5b94
SP
1206 } else {
1207 netif_receive_skb(skb);
1208 }
6b7c5b94
SP
1209}
1210
5be93b9a
AK
1211/* Process the RX completion indicated by rxcp when GRO is enabled */
1212static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1213 struct be_rx_obj *rxo,
2e588f84 1214 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1215{
1216 struct be_rx_page_info *page_info;
5be93b9a 1217 struct sk_buff *skb = NULL;
3abcdeda
SP
1218 struct be_queue_info *rxq = &rxo->q;
1219 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1220 u16 remaining, curr_frag_len;
1221 u16 i, j;
3968fa1e 1222
5be93b9a
AK
1223 skb = napi_get_frags(&eq_obj->napi);
1224 if (!skb) {
3abcdeda 1225 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1226 return;
1227 }
1228
2e588f84
SP
1229 remaining = rxcp->pkt_size;
1230 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1231 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1232
1233 curr_frag_len = min(remaining, rx_frag_size);
1234
bd46cb6c
AK
1235 /* Coalesce all frags from the same physical page in one slot */
1236 if (i == 0 || page_info->page_offset == 0) {
1237 /* First frag or Fresh page */
1238 j++;
5be93b9a
AK
1239 skb_shinfo(skb)->frags[j].page = page_info->page;
1240 skb_shinfo(skb)->frags[j].page_offset =
1241 page_info->page_offset;
1242 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1243 } else {
1244 put_page(page_info->page);
1245 }
5be93b9a 1246 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1247
bd46cb6c 1248 remaining -= curr_frag_len;
2e588f84 1249 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1250 memset(page_info, 0, sizeof(*page_info));
1251 }
bd46cb6c 1252 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1253
5be93b9a 1254 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1255 skb->len = rxcp->pkt_size;
1256 skb->data_len = rxcp->pkt_size;
1257 skb->truesize += rxcp->pkt_size;
5be93b9a 1258 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1259 if (adapter->netdev->features & NETIF_F_RXHASH)
1260 skb->rxhash = rxcp->rss_hash;
5be93b9a 1261
2e588f84 1262 if (likely(!rxcp->vlanf))
5be93b9a 1263 napi_gro_frags(&eq_obj->napi);
2e588f84 1264 else
6709d952
SK
1265 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1266 rxcp->vlan_tag);
2e588f84
SP
1267}
1268
1269static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1270 struct be_eth_rx_compl *compl,
1271 struct be_rx_compl_info *rxcp)
1272{
1273 rxcp->pkt_size =
1274 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1275 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1276 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1277 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1278 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1279 rxcp->ip_csum =
1280 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1281 rxcp->l4_csum =
1282 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1283 rxcp->ipv6 =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1285 rxcp->rxq_idx =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1287 rxcp->num_rcvd =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1289 rxcp->pkt_type =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1291 rxcp->rss_hash =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1293 if (rxcp->vlanf) {
1294 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1295 compl);
1296 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1297 compl);
15d72184 1298 }
2e588f84
SP
1299}
1300
1301static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1302 struct be_eth_rx_compl *compl,
1303 struct be_rx_compl_info *rxcp)
1304{
1305 rxcp->pkt_size =
1306 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1307 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1308 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1309 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1310 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1311 rxcp->ip_csum =
1312 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1313 rxcp->l4_csum =
1314 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1315 rxcp->ipv6 =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1317 rxcp->rxq_idx =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1319 rxcp->num_rcvd =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1321 rxcp->pkt_type =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1323 rxcp->rss_hash =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1325 if (rxcp->vlanf) {
1326 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1327 compl);
1328 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1329 compl);
15d72184 1330 }
2e588f84
SP
1331}
1332
1333static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1334{
1335 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1336 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1337 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1338
2e588f84
SP
1339 /* For checking the valid bit it is Ok to use either definition as the
1340 * valid bit is at the same position in both v0 and v1 Rx compl */
1341 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1342 return NULL;
6b7c5b94 1343
2e588f84
SP
1344 rmb();
1345 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1346
2e588f84
SP
1347 if (adapter->be3_native)
1348 be_parse_rx_compl_v1(adapter, compl, rxcp);
1349 else
1350 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1351
15d72184
SP
1352 if (rxcp->vlanf) {
1353 /* vlanf could be wrongly set in some cards.
1354 * ignore if vtm is not set */
1355 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1356 rxcp->vlanf = 0;
6b7c5b94 1357
15d72184 1358 if (!lancer_chip(adapter))
3c709f8f 1359 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1360
3c709f8f
DM
1361 if (((adapter->pvid & VLAN_VID_MASK) ==
1362 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1363 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1364 rxcp->vlanf = 0;
1365 }
2e588f84
SP
1366
1367 /* As the compl has been parsed, reset it; we wont touch it again */
1368 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1369
3abcdeda 1370 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1371 return rxcp;
1372}
1373
1829b086 1374static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1375{
6b7c5b94 1376 u32 order = get_order(size);
1829b086 1377
6b7c5b94 1378 if (order > 0)
1829b086
ED
1379 gfp |= __GFP_COMP;
1380 return alloc_pages(gfp, order);
6b7c5b94
SP
1381}
1382
1383/*
1384 * Allocate a page, split it to fragments of size rx_frag_size and post as
1385 * receive buffers to BE
1386 */
1829b086 1387static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1388{
3abcdeda
SP
1389 struct be_adapter *adapter = rxo->adapter;
1390 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1391 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1392 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1393 struct page *pagep = NULL;
1394 struct be_eth_rx_d *rxd;
1395 u64 page_dmaaddr = 0, frag_dmaaddr;
1396 u32 posted, page_offset = 0;
1397
3abcdeda 1398 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1399 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1400 if (!pagep) {
1829b086 1401 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1402 if (unlikely(!pagep)) {
3abcdeda 1403 rxo->stats.rx_post_fail++;
6b7c5b94
SP
1404 break;
1405 }
2b7bcebf
IV
1406 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1407 0, adapter->big_page_size,
1408 DMA_FROM_DEVICE);
6b7c5b94
SP
1409 page_info->page_offset = 0;
1410 } else {
1411 get_page(pagep);
1412 page_info->page_offset = page_offset + rx_frag_size;
1413 }
1414 page_offset = page_info->page_offset;
1415 page_info->page = pagep;
fac6da5b 1416 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1417 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1418
1419 rxd = queue_head_node(rxq);
1420 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1421 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1422
1423 /* Any space left in the current big page for another frag? */
1424 if ((page_offset + rx_frag_size + rx_frag_size) >
1425 adapter->big_page_size) {
1426 pagep = NULL;
1427 page_info->last_page_user = true;
1428 }
26d92f92
SP
1429
1430 prev_page_info = page_info;
1431 queue_head_inc(rxq);
6b7c5b94
SP
1432 page_info = &page_info_tbl[rxq->head];
1433 }
1434 if (pagep)
26d92f92 1435 prev_page_info->last_page_user = true;
6b7c5b94
SP
1436
1437 if (posted) {
6b7c5b94 1438 atomic_add(posted, &rxq->used);
8788fdc2 1439 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1440 } else if (atomic_read(&rxq->used) == 0) {
1441 /* Let be_worker replenish when memory is available */
3abcdeda 1442 rxo->rx_post_starved = true;
6b7c5b94 1443 }
6b7c5b94
SP
1444}
1445
5fb379ee 1446static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1447{
6b7c5b94
SP
1448 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1449
1450 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1451 return NULL;
1452
f3eb62d2 1453 rmb();
6b7c5b94
SP
1454 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1455
1456 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1457
1458 queue_tail_inc(tx_cq);
1459 return txcp;
1460}
1461
4d586b82 1462static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
6b7c5b94
SP
1463{
1464 struct be_queue_info *txq = &adapter->tx_obj.q;
a73b796e 1465 struct be_eth_wrb *wrb;
6b7c5b94
SP
1466 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1467 struct sk_buff *sent_skb;
ec43b1a6
SP
1468 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1469 bool unmap_skb_hdr = true;
6b7c5b94 1470
ec43b1a6 1471 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1472 BUG_ON(!sent_skb);
ec43b1a6
SP
1473 sent_skbs[txq->tail] = NULL;
1474
1475 /* skip header wrb */
a73b796e 1476 queue_tail_inc(txq);
6b7c5b94 1477
ec43b1a6 1478 do {
6b7c5b94 1479 cur_index = txq->tail;
a73b796e 1480 wrb = queue_tail_node(txq);
2b7bcebf
IV
1481 unmap_tx_frag(&adapter->pdev->dev, wrb,
1482 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1483 unmap_skb_hdr = false;
1484
6b7c5b94
SP
1485 num_wrbs++;
1486 queue_tail_inc(txq);
ec43b1a6 1487 } while (cur_index != last_index);
6b7c5b94 1488
6b7c5b94 1489 kfree_skb(sent_skb);
4d586b82 1490 return num_wrbs;
6b7c5b94
SP
1491}
1492
859b1e4e
SP
1493static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1494{
1495 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1496
1497 if (!eqe->evt)
1498 return NULL;
1499
f3eb62d2 1500 rmb();
859b1e4e
SP
1501 eqe->evt = le32_to_cpu(eqe->evt);
1502 queue_tail_inc(&eq_obj->q);
1503 return eqe;
1504}
1505
1506static int event_handle(struct be_adapter *adapter,
1507 struct be_eq_obj *eq_obj)
1508{
1509 struct be_eq_entry *eqe;
1510 u16 num = 0;
1511
1512 while ((eqe = event_get(eq_obj)) != NULL) {
1513 eqe->evt = 0;
1514 num++;
1515 }
1516
1517 /* Deal with any spurious interrupts that come
1518 * without events
1519 */
1520 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1521 if (num)
1522 napi_schedule(&eq_obj->napi);
1523
1524 return num;
1525}
1526
1527/* Just read and notify events without processing them.
1528 * Used at the time of destroying event queues */
1529static void be_eq_clean(struct be_adapter *adapter,
1530 struct be_eq_obj *eq_obj)
1531{
1532 struct be_eq_entry *eqe;
1533 u16 num = 0;
1534
1535 while ((eqe = event_get(eq_obj)) != NULL) {
1536 eqe->evt = 0;
1537 num++;
1538 }
1539
1540 if (num)
1541 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1542}
1543
3abcdeda 1544static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1545{
1546 struct be_rx_page_info *page_info;
3abcdeda
SP
1547 struct be_queue_info *rxq = &rxo->q;
1548 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1549 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1550 u16 tail;
1551
1552 /* First cleanup pending rx completions */
3abcdeda
SP
1553 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1554 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1555 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1556 }
1557
1558 /* Then free posted rx buffer that were not used */
1559 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1560 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1561 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1562 put_page(page_info->page);
1563 memset(page_info, 0, sizeof(*page_info));
1564 }
1565 BUG_ON(atomic_read(&rxq->used));
1566}
1567
a8e9179a 1568static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1569{
a8e9179a 1570 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1571 struct be_queue_info *txq = &adapter->tx_obj.q;
a8e9179a 1572 struct be_eth_tx_compl *txcp;
4d586b82 1573 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1574 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1575 struct sk_buff *sent_skb;
1576 bool dummy_wrb;
a8e9179a
SP
1577
1578 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1579 do {
1580 while ((txcp = be_tx_compl_get(tx_cq))) {
1581 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1582 wrb_index, txcp);
4d586b82 1583 num_wrbs += be_tx_compl_process(adapter, end_idx);
a8e9179a
SP
1584 cmpl++;
1585 }
1586 if (cmpl) {
1587 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1588 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1589 cmpl = 0;
4d586b82 1590 num_wrbs = 0;
a8e9179a
SP
1591 }
1592
1593 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1594 break;
1595
1596 mdelay(1);
1597 } while (true);
1598
1599 if (atomic_read(&txq->used))
1600 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1601 atomic_read(&txq->used));
b03388d6
SP
1602
1603 /* free posted tx for which compls will never arrive */
1604 while (atomic_read(&txq->used)) {
1605 sent_skb = sent_skbs[txq->tail];
1606 end_idx = txq->tail;
1607 index_adv(&end_idx,
fe6d2a38
SP
1608 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1609 txq->len);
4d586b82
PR
1610 num_wrbs = be_tx_compl_process(adapter, end_idx);
1611 atomic_sub(num_wrbs, &txq->used);
b03388d6 1612 }
6b7c5b94
SP
1613}
1614
5fb379ee
SP
1615static void be_mcc_queues_destroy(struct be_adapter *adapter)
1616{
1617 struct be_queue_info *q;
5fb379ee 1618
8788fdc2 1619 q = &adapter->mcc_obj.q;
5fb379ee 1620 if (q->created)
8788fdc2 1621 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1622 be_queue_free(adapter, q);
1623
8788fdc2 1624 q = &adapter->mcc_obj.cq;
5fb379ee 1625 if (q->created)
8788fdc2 1626 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1627 be_queue_free(adapter, q);
1628}
1629
1630/* Must be called only after TX qs are created as MCC shares TX EQ */
1631static int be_mcc_queues_create(struct be_adapter *adapter)
1632{
1633 struct be_queue_info *q, *cq;
5fb379ee
SP
1634
1635 /* Alloc MCC compl queue */
8788fdc2 1636 cq = &adapter->mcc_obj.cq;
5fb379ee 1637 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1638 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1639 goto err;
1640
1641 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1642 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1643 goto mcc_cq_free;
1644
1645 /* Alloc MCC queue */
8788fdc2 1646 q = &adapter->mcc_obj.q;
5fb379ee
SP
1647 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1648 goto mcc_cq_destroy;
1649
1650 /* Ask BE to create MCC queue */
8788fdc2 1651 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1652 goto mcc_q_free;
1653
1654 return 0;
1655
1656mcc_q_free:
1657 be_queue_free(adapter, q);
1658mcc_cq_destroy:
8788fdc2 1659 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1660mcc_cq_free:
1661 be_queue_free(adapter, cq);
1662err:
1663 return -1;
1664}
1665
6b7c5b94
SP
1666static void be_tx_queues_destroy(struct be_adapter *adapter)
1667{
1668 struct be_queue_info *q;
1669
1670 q = &adapter->tx_obj.q;
a8e9179a 1671 if (q->created)
8788fdc2 1672 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
6b7c5b94
SP
1673 be_queue_free(adapter, q);
1674
1675 q = &adapter->tx_obj.cq;
1676 if (q->created)
8788fdc2 1677 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
6b7c5b94
SP
1678 be_queue_free(adapter, q);
1679
859b1e4e
SP
1680 /* Clear any residual events */
1681 be_eq_clean(adapter, &adapter->tx_eq);
1682
6b7c5b94
SP
1683 q = &adapter->tx_eq.q;
1684 if (q->created)
8788fdc2 1685 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1686 be_queue_free(adapter, q);
1687}
1688
1689static int be_tx_queues_create(struct be_adapter *adapter)
1690{
1691 struct be_queue_info *eq, *q, *cq;
1692
1693 adapter->tx_eq.max_eqd = 0;
1694 adapter->tx_eq.min_eqd = 0;
1695 adapter->tx_eq.cur_eqd = 96;
1696 adapter->tx_eq.enable_aic = false;
1697 /* Alloc Tx Event queue */
1698 eq = &adapter->tx_eq.q;
1699 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1700 return -1;
1701
1702 /* Ask BE to create Tx Event queue */
8788fdc2 1703 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
6b7c5b94 1704 goto tx_eq_free;
fe6d2a38 1705
ecd62107 1706 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1707
ba343c77 1708
6b7c5b94
SP
1709 /* Alloc TX eth compl queue */
1710 cq = &adapter->tx_obj.cq;
1711 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1712 sizeof(struct be_eth_tx_compl)))
1713 goto tx_eq_destroy;
1714
1715 /* Ask BE to create Tx eth compl queue */
8788fdc2 1716 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
6b7c5b94
SP
1717 goto tx_cq_free;
1718
1719 /* Alloc TX eth queue */
1720 q = &adapter->tx_obj.q;
1721 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1722 goto tx_cq_destroy;
1723
1724 /* Ask BE to create Tx eth queue */
8788fdc2 1725 if (be_cmd_txq_create(adapter, q, cq))
6b7c5b94
SP
1726 goto tx_q_free;
1727 return 0;
1728
1729tx_q_free:
1730 be_queue_free(adapter, q);
1731tx_cq_destroy:
8788fdc2 1732 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
6b7c5b94
SP
1733tx_cq_free:
1734 be_queue_free(adapter, cq);
1735tx_eq_destroy:
8788fdc2 1736 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
6b7c5b94
SP
1737tx_eq_free:
1738 be_queue_free(adapter, eq);
1739 return -1;
1740}
1741
1742static void be_rx_queues_destroy(struct be_adapter *adapter)
1743{
1744 struct be_queue_info *q;
3abcdeda
SP
1745 struct be_rx_obj *rxo;
1746 int i;
1747
1748 for_all_rx_queues(adapter, rxo, i) {
1749 q = &rxo->q;
1750 if (q->created) {
1751 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1752 /* After the rxq is invalidated, wait for a grace time
1753 * of 1ms for all dma to end and the flush compl to
1754 * arrive
1755 */
1756 mdelay(1);
1757 be_rx_q_clean(adapter, rxo);
1758 }
1759 be_queue_free(adapter, q);
1760
1761 q = &rxo->cq;
1762 if (q->created)
1763 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1764 be_queue_free(adapter, q);
1765
1766 /* Clear any residual events */
1767 q = &rxo->rx_eq.q;
1768 if (q->created) {
1769 be_eq_clean(adapter, &rxo->rx_eq);
1770 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1771 }
1772 be_queue_free(adapter, q);
6b7c5b94 1773 }
6b7c5b94
SP
1774}
1775
ac6a0c4a
SP
1776static u32 be_num_rxqs_want(struct be_adapter *adapter)
1777{
1778 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1779 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1780 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1781 } else {
1782 dev_warn(&adapter->pdev->dev,
1783 "No support for multiple RX queues\n");
1784 return 1;
1785 }
1786}
1787
6b7c5b94
SP
1788static int be_rx_queues_create(struct be_adapter *adapter)
1789{
1790 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1791 struct be_rx_obj *rxo;
1792 int rc, i;
6b7c5b94 1793
ac6a0c4a
SP
1794 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1795 msix_enabled(adapter) ?
1796 adapter->num_msix_vec - 1 : 1);
1797 if (adapter->num_rx_qs != MAX_RX_QS)
1798 dev_warn(&adapter->pdev->dev,
1799 "Can create only %d RX queues", adapter->num_rx_qs);
1800
6b7c5b94 1801 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1802 for_all_rx_queues(adapter, rxo, i) {
1803 rxo->adapter = adapter;
1804 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1805 rxo->rx_eq.enable_aic = true;
1806
1807 /* EQ */
1808 eq = &rxo->rx_eq.q;
1809 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1810 sizeof(struct be_eq_entry));
1811 if (rc)
1812 goto err;
1813
1814 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1815 if (rc)
1816 goto err;
1817
ecd62107 1818 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1819
3abcdeda
SP
1820 /* CQ */
1821 cq = &rxo->cq;
1822 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1823 sizeof(struct be_eth_rx_compl));
1824 if (rc)
1825 goto err;
1826
1827 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1828 if (rc)
1829 goto err;
3abcdeda
SP
1830 /* Rx Q */
1831 q = &rxo->q;
1832 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1833 sizeof(struct be_eth_rx_d));
1834 if (rc)
1835 goto err;
1836
1837 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1838 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1839 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1840 if (rc)
1841 goto err;
1842 }
1843
1844 if (be_multi_rxq(adapter)) {
1845 u8 rsstable[MAX_RSS_QS];
1846
1847 for_all_rss_queues(adapter, rxo, i)
1848 rsstable[i] = rxo->rss_id;
1849
1850 rc = be_cmd_rss_config(adapter, rsstable,
1851 adapter->num_rx_qs - 1);
1852 if (rc)
1853 goto err;
1854 }
6b7c5b94
SP
1855
1856 return 0;
3abcdeda
SP
1857err:
1858 be_rx_queues_destroy(adapter);
1859 return -1;
6b7c5b94 1860}
6b7c5b94 1861
fe6d2a38 1862static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1863{
fe6d2a38
SP
1864 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1865 if (!eqe->evt)
1866 return false;
1867 else
1868 return true;
b628bde2
SP
1869}
1870
6b7c5b94
SP
1871static irqreturn_t be_intx(int irq, void *dev)
1872{
1873 struct be_adapter *adapter = dev;
3abcdeda 1874 struct be_rx_obj *rxo;
fe6d2a38 1875 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1876
fe6d2a38
SP
1877 if (lancer_chip(adapter)) {
1878 if (event_peek(&adapter->tx_eq))
1879 tx = event_handle(adapter, &adapter->tx_eq);
1880 for_all_rx_queues(adapter, rxo, i) {
1881 if (event_peek(&rxo->rx_eq))
1882 rx |= event_handle(adapter, &rxo->rx_eq);
1883 }
6b7c5b94 1884
fe6d2a38
SP
1885 if (!(tx || rx))
1886 return IRQ_NONE;
3abcdeda 1887
fe6d2a38
SP
1888 } else {
1889 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1890 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1891 if (!isr)
1892 return IRQ_NONE;
1893
ecd62107 1894 if ((1 << adapter->tx_eq.eq_idx & isr))
fe6d2a38
SP
1895 event_handle(adapter, &adapter->tx_eq);
1896
1897 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1898 if ((1 << rxo->rx_eq.eq_idx & isr))
fe6d2a38
SP
1899 event_handle(adapter, &rxo->rx_eq);
1900 }
3abcdeda 1901 }
c001c213 1902
8788fdc2 1903 return IRQ_HANDLED;
6b7c5b94
SP
1904}
1905
1906static irqreturn_t be_msix_rx(int irq, void *dev)
1907{
3abcdeda
SP
1908 struct be_rx_obj *rxo = dev;
1909 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1910
3abcdeda 1911 event_handle(adapter, &rxo->rx_eq);
6b7c5b94
SP
1912
1913 return IRQ_HANDLED;
1914}
1915
5fb379ee 1916static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1917{
1918 struct be_adapter *adapter = dev;
1919
8788fdc2 1920 event_handle(adapter, &adapter->tx_eq);
6b7c5b94
SP
1921
1922 return IRQ_HANDLED;
1923}
1924
2e588f84 1925static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1926{
2e588f84 1927 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1928}
1929
49b05221 1930static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1931{
1932 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1933 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1934 struct be_adapter *adapter = rxo->adapter;
1935 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1936 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1937 u32 work_done;
1938
3abcdeda 1939 rxo->stats.rx_polls++;
6b7c5b94 1940 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1941 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1942 if (!rxcp)
1943 break;
1944
e80d9da6 1945 /* Ignore flush completions */
009dd872 1946 if (rxcp->num_rcvd && rxcp->pkt_size) {
2e588f84 1947 if (do_gro(rxcp))
64642811
SP
1948 be_rx_compl_process_gro(adapter, rxo, rxcp);
1949 else
1950 be_rx_compl_process(adapter, rxo, rxcp);
009dd872
PR
1951 } else if (rxcp->pkt_size == 0) {
1952 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1953 }
009dd872 1954
2e588f84 1955 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1956 }
1957
6b7c5b94 1958 /* Refill the queue */
3abcdeda 1959 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1960 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1961
1962 /* All consumed */
1963 if (work_done < budget) {
1964 napi_complete(napi);
8788fdc2 1965 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1966 } else {
1967 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1968 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1969 }
1970 return work_done;
1971}
1972
f31e50a8
SP
1973/* As TX and MCC share the same EQ check for both TX and MCC completions.
1974 * For TX/MCC we don't honour budget; consume everything
1975 */
1976static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1977{
f31e50a8
SP
1978 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1979 struct be_adapter *adapter =
1980 container_of(tx_eq, struct be_adapter, tx_eq);
5fb379ee
SP
1981 struct be_queue_info *txq = &adapter->tx_obj.q;
1982 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1983 struct be_eth_tx_compl *txcp;
f31e50a8 1984 int tx_compl = 0, mcc_compl, status = 0;
4d586b82 1985 u16 end_idx, num_wrbs = 0;
6b7c5b94 1986
5fb379ee 1987 while ((txcp = be_tx_compl_get(tx_cq))) {
6b7c5b94 1988 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
f31e50a8 1989 wrb_index, txcp);
4d586b82 1990 num_wrbs += be_tx_compl_process(adapter, end_idx);
f31e50a8 1991 tx_compl++;
6b7c5b94
SP
1992 }
1993
f31e50a8
SP
1994 mcc_compl = be_process_mcc(adapter, &status);
1995
1996 napi_complete(napi);
1997
1998 if (mcc_compl) {
1999 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2000 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2001 }
2002
2003 if (tx_compl) {
2004 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
5fb379ee 2005
4d586b82
PR
2006 atomic_sub(num_wrbs, &txq->used);
2007
5fb379ee
SP
2008 /* As Tx wrbs have been freed up, wake up netdev queue if
2009 * it was stopped due to lack of tx wrbs.
2010 */
2011 if (netif_queue_stopped(adapter->netdev) &&
6b7c5b94 2012 atomic_read(&txq->used) < txq->len / 2) {
5fb379ee
SP
2013 netif_wake_queue(adapter->netdev);
2014 }
2015
3abcdeda
SP
2016 tx_stats(adapter)->be_tx_events++;
2017 tx_stats(adapter)->be_tx_compl += tx_compl;
6b7c5b94 2018 }
6b7c5b94
SP
2019
2020 return 1;
2021}
2022
d053de91 2023void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
2024{
2025 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2026 u32 i;
2027
2028 pci_read_config_dword(adapter->pdev,
2029 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2030 pci_read_config_dword(adapter->pdev,
2031 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2032 pci_read_config_dword(adapter->pdev,
2033 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2034 pci_read_config_dword(adapter->pdev,
2035 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2036
2037 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2038 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2039
d053de91
AK
2040 if (ue_status_lo || ue_status_hi) {
2041 adapter->ue_detected = true;
7acc2087 2042 adapter->eeh_err = true;
d053de91
AK
2043 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2044 }
2045
7c185276
AK
2046 if (ue_status_lo) {
2047 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2048 if (ue_status_lo & 1)
2049 dev_err(&adapter->pdev->dev,
2050 "UE: %s bit set\n", ue_status_low_desc[i]);
2051 }
2052 }
2053 if (ue_status_hi) {
2054 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2055 if (ue_status_hi & 1)
2056 dev_err(&adapter->pdev->dev,
2057 "UE: %s bit set\n", ue_status_hi_desc[i]);
2058 }
2059 }
2060
2061}
2062
ea1dae11
SP
2063static void be_worker(struct work_struct *work)
2064{
2065 struct be_adapter *adapter =
2066 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
2067 struct be_rx_obj *rxo;
2068 int i;
ea1dae11 2069
16da8250
SP
2070 if (!adapter->ue_detected && !lancer_chip(adapter))
2071 be_detect_dump_ue(adapter);
2072
f203af70
SK
2073 /* when interrupts are not yet enabled, just reap any pending
2074 * mcc completions */
2075 if (!netif_running(adapter->netdev)) {
2076 int mcc_compl, status = 0;
2077
2078 mcc_compl = be_process_mcc(adapter, &status);
2079
2080 if (mcc_compl) {
2081 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2082 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2083 }
9b037f38 2084
f203af70
SK
2085 goto reschedule;
2086 }
2087
005d5696
SX
2088 if (!adapter->stats_cmd_sent) {
2089 if (lancer_chip(adapter))
2090 lancer_cmd_get_pport_stats(adapter,
2091 &adapter->stats_cmd);
2092 else
2093 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2094 }
4097f663 2095 be_tx_rate_update(adapter);
4097f663 2096
3abcdeda
SP
2097 for_all_rx_queues(adapter, rxo, i) {
2098 be_rx_rate_update(rxo);
2099 be_rx_eqd_update(adapter, rxo);
2100
2101 if (rxo->rx_post_starved) {
2102 rxo->rx_post_starved = false;
1829b086 2103 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 2104 }
ea1dae11
SP
2105 }
2106
f203af70 2107reschedule:
e74fbd03 2108 adapter->work_counter++;
ea1dae11
SP
2109 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2110}
2111
8d56ff11
SP
2112static void be_msix_disable(struct be_adapter *adapter)
2113{
ac6a0c4a 2114 if (msix_enabled(adapter)) {
8d56ff11 2115 pci_disable_msix(adapter->pdev);
ac6a0c4a 2116 adapter->num_msix_vec = 0;
3abcdeda
SP
2117 }
2118}
2119
6b7c5b94
SP
2120static void be_msix_enable(struct be_adapter *adapter)
2121{
3abcdeda 2122#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2123 int i, status, num_vec;
6b7c5b94 2124
ac6a0c4a 2125 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2126
ac6a0c4a 2127 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2128 adapter->msix_entries[i].entry = i;
2129
ac6a0c4a 2130 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2131 if (status == 0) {
2132 goto done;
2133 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2134 num_vec = status;
3abcdeda 2135 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2136 num_vec) == 0)
3abcdeda 2137 goto done;
3abcdeda
SP
2138 }
2139 return;
2140done:
ac6a0c4a
SP
2141 adapter->num_msix_vec = num_vec;
2142 return;
6b7c5b94
SP
2143}
2144
ba343c77
SB
2145static void be_sriov_enable(struct be_adapter *adapter)
2146{
344dbf10 2147 be_check_sriov_fn_type(adapter);
6dedec81 2148#ifdef CONFIG_PCI_IOV
ba343c77 2149 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2150 int status, pos;
2151 u16 nvfs;
2152
2153 pos = pci_find_ext_capability(adapter->pdev,
2154 PCI_EXT_CAP_ID_SRIOV);
2155 pci_read_config_word(adapter->pdev,
2156 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2157
2158 if (num_vfs > nvfs) {
2159 dev_info(&adapter->pdev->dev,
2160 "Device supports %d VFs and not %d\n",
2161 nvfs, num_vfs);
2162 num_vfs = nvfs;
2163 }
6dedec81 2164
ba343c77
SB
2165 status = pci_enable_sriov(adapter->pdev, num_vfs);
2166 adapter->sriov_enabled = status ? false : true;
2167 }
2168#endif
ba343c77
SB
2169}
2170
2171static void be_sriov_disable(struct be_adapter *adapter)
2172{
2173#ifdef CONFIG_PCI_IOV
2174 if (adapter->sriov_enabled) {
2175 pci_disable_sriov(adapter->pdev);
2176 adapter->sriov_enabled = false;
2177 }
2178#endif
2179}
2180
fe6d2a38
SP
2181static inline int be_msix_vec_get(struct be_adapter *adapter,
2182 struct be_eq_obj *eq_obj)
6b7c5b94 2183{
ecd62107 2184 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2185}
2186
b628bde2
SP
2187static int be_request_irq(struct be_adapter *adapter,
2188 struct be_eq_obj *eq_obj,
3abcdeda 2189 void *handler, char *desc, void *context)
6b7c5b94
SP
2190{
2191 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2192 int vec;
2193
2194 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2195 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2196 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2197}
2198
3abcdeda
SP
2199static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2200 void *context)
b628bde2 2201{
fe6d2a38 2202 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2203 free_irq(vec, context);
b628bde2 2204}
6b7c5b94 2205
b628bde2
SP
2206static int be_msix_register(struct be_adapter *adapter)
2207{
3abcdeda
SP
2208 struct be_rx_obj *rxo;
2209 int status, i;
2210 char qname[10];
b628bde2 2211
3abcdeda
SP
2212 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2213 adapter);
6b7c5b94
SP
2214 if (status)
2215 goto err;
2216
3abcdeda
SP
2217 for_all_rx_queues(adapter, rxo, i) {
2218 sprintf(qname, "rxq%d", i);
2219 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2220 qname, rxo);
2221 if (status)
2222 goto err_msix;
2223 }
b628bde2 2224
6b7c5b94 2225 return 0;
b628bde2 2226
3abcdeda
SP
2227err_msix:
2228 be_free_irq(adapter, &adapter->tx_eq, adapter);
2229
2230 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2231 be_free_irq(adapter, &rxo->rx_eq, rxo);
2232
6b7c5b94
SP
2233err:
2234 dev_warn(&adapter->pdev->dev,
2235 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2236 be_msix_disable(adapter);
6b7c5b94
SP
2237 return status;
2238}
2239
2240static int be_irq_register(struct be_adapter *adapter)
2241{
2242 struct net_device *netdev = adapter->netdev;
2243 int status;
2244
ac6a0c4a 2245 if (msix_enabled(adapter)) {
6b7c5b94
SP
2246 status = be_msix_register(adapter);
2247 if (status == 0)
2248 goto done;
ba343c77
SB
2249 /* INTx is not supported for VF */
2250 if (!be_physfn(adapter))
2251 return status;
6b7c5b94
SP
2252 }
2253
2254 /* INTx */
2255 netdev->irq = adapter->pdev->irq;
2256 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2257 adapter);
2258 if (status) {
2259 dev_err(&adapter->pdev->dev,
2260 "INTx request IRQ failed - err %d\n", status);
2261 return status;
2262 }
2263done:
2264 adapter->isr_registered = true;
2265 return 0;
2266}
2267
2268static void be_irq_unregister(struct be_adapter *adapter)
2269{
2270 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2271 struct be_rx_obj *rxo;
2272 int i;
6b7c5b94
SP
2273
2274 if (!adapter->isr_registered)
2275 return;
2276
2277 /* INTx */
ac6a0c4a 2278 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2279 free_irq(netdev->irq, adapter);
2280 goto done;
2281 }
2282
2283 /* MSIx */
3abcdeda
SP
2284 be_free_irq(adapter, &adapter->tx_eq, adapter);
2285
2286 for_all_rx_queues(adapter, rxo, i)
2287 be_free_irq(adapter, &rxo->rx_eq, rxo);
2288
6b7c5b94
SP
2289done:
2290 adapter->isr_registered = false;
6b7c5b94
SP
2291}
2292
889cd4b2
SP
2293static int be_close(struct net_device *netdev)
2294{
2295 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2296 struct be_rx_obj *rxo;
889cd4b2 2297 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2298 int vec, i;
889cd4b2 2299
889cd4b2
SP
2300 be_async_mcc_disable(adapter);
2301
889cd4b2
SP
2302 netif_carrier_off(netdev);
2303 adapter->link_up = false;
2304
fe6d2a38
SP
2305 if (!lancer_chip(adapter))
2306 be_intr_set(adapter, false);
889cd4b2 2307
63fcb27f
PR
2308 for_all_rx_queues(adapter, rxo, i)
2309 napi_disable(&rxo->rx_eq.napi);
2310
2311 napi_disable(&tx_eq->napi);
2312
2313 if (lancer_chip(adapter)) {
2314 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2315 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2316 for_all_rx_queues(adapter, rxo, i)
2317 be_cq_notify(adapter, rxo->cq.id, false, 0);
2318 }
2319
ac6a0c4a 2320 if (msix_enabled(adapter)) {
fe6d2a38 2321 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2322 synchronize_irq(vec);
3abcdeda
SP
2323
2324 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2325 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2326 synchronize_irq(vec);
2327 }
889cd4b2
SP
2328 } else {
2329 synchronize_irq(netdev->irq);
2330 }
2331 be_irq_unregister(adapter);
2332
889cd4b2
SP
2333 /* Wait for all pending tx completions to arrive so that
2334 * all tx skbs are freed.
2335 */
2336 be_tx_compl_clean(adapter);
2337
2338 return 0;
2339}
2340
6b7c5b94
SP
2341static int be_open(struct net_device *netdev)
2342{
2343 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2344 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2345 struct be_rx_obj *rxo;
a8f447bd 2346 bool link_up;
3abcdeda 2347 int status, i;
0388f251
SB
2348 u8 mac_speed;
2349 u16 link_speed;
5fb379ee 2350
3abcdeda 2351 for_all_rx_queues(adapter, rxo, i) {
1829b086 2352 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda
SP
2353 napi_enable(&rxo->rx_eq.napi);
2354 }
5fb379ee
SP
2355 napi_enable(&tx_eq->napi);
2356
2357 be_irq_register(adapter);
2358
fe6d2a38
SP
2359 if (!lancer_chip(adapter))
2360 be_intr_set(adapter, true);
5fb379ee
SP
2361
2362 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2363 for_all_rx_queues(adapter, rxo, i) {
2364 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2365 be_cq_notify(adapter, rxo->cq.id, true, 0);
2366 }
8788fdc2 2367 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2368
7a1e9b20
SP
2369 /* Now that interrupts are on we can process async mcc */
2370 be_async_mcc_enable(adapter);
2371
0388f251 2372 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
187e8756 2373 &link_speed, 0);
a8f447bd 2374 if (status)
889cd4b2 2375 goto err;
a8f447bd 2376 be_link_status_update(adapter, link_up);
5fb379ee 2377
889cd4b2 2378 if (be_physfn(adapter)) {
1da87b7f 2379 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2380 if (status)
2381 goto err;
4f2aa89c 2382
ba343c77
SB
2383 status = be_cmd_set_flow_control(adapter,
2384 adapter->tx_fc, adapter->rx_fc);
2385 if (status)
889cd4b2 2386 goto err;
ba343c77 2387 }
4f2aa89c 2388
889cd4b2
SP
2389 return 0;
2390err:
2391 be_close(adapter->netdev);
2392 return -EIO;
5fb379ee
SP
2393}
2394
71d8d1b5
AK
2395static int be_setup_wol(struct be_adapter *adapter, bool enable)
2396{
2397 struct be_dma_mem cmd;
2398 int status = 0;
2399 u8 mac[ETH_ALEN];
2400
2401 memset(mac, 0, ETH_ALEN);
2402
2403 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2404 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2405 GFP_KERNEL);
71d8d1b5
AK
2406 if (cmd.va == NULL)
2407 return -1;
2408 memset(cmd.va, 0, cmd.size);
2409
2410 if (enable) {
2411 status = pci_write_config_dword(adapter->pdev,
2412 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2413 if (status) {
2414 dev_err(&adapter->pdev->dev,
2381a55c 2415 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2416 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2417 cmd.dma);
71d8d1b5
AK
2418 return status;
2419 }
2420 status = be_cmd_enable_magic_wol(adapter,
2421 adapter->netdev->dev_addr, &cmd);
2422 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2423 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2424 } else {
2425 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2426 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2427 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2428 }
2429
2b7bcebf 2430 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2431 return status;
2432}
2433
6d87f5c3
AK
2434/*
2435 * Generate a seed MAC address from the PF MAC Address using jhash.
2436 * MAC Address for VFs are assigned incrementally starting from the seed.
2437 * These addresses are programmed in the ASIC by the PF and the VF driver
2438 * queries for the MAC address during its probe.
2439 */
2440static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2441{
2442 u32 vf = 0;
3abcdeda 2443 int status = 0;
6d87f5c3
AK
2444 u8 mac[ETH_ALEN];
2445
2446 be_vf_eth_addr_generate(adapter, mac);
2447
2448 for (vf = 0; vf < num_vfs; vf++) {
2449 status = be_cmd_pmac_add(adapter, mac,
2450 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2451 &adapter->vf_cfg[vf].vf_pmac_id,
2452 vf + 1);
6d87f5c3
AK
2453 if (status)
2454 dev_err(&adapter->pdev->dev,
2455 "Mac address add failed for VF %d\n", vf);
2456 else
2457 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2458
2459 mac[5] += 1;
2460 }
2461 return status;
2462}
2463
2464static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2465{
2466 u32 vf;
2467
2468 for (vf = 0; vf < num_vfs; vf++) {
2469 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2470 be_cmd_pmac_del(adapter,
2471 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2472 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2473 }
2474}
2475
5fb379ee
SP
2476static int be_setup(struct be_adapter *adapter)
2477{
5fb379ee 2478 struct net_device *netdev = adapter->netdev;
ba343c77 2479 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2480 int status;
ba343c77
SB
2481 u8 mac[ETH_ALEN];
2482
f21b538c
PR
2483 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2484 BE_IF_FLAGS_BROADCAST |
2485 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2486
ba343c77
SB
2487 if (be_physfn(adapter)) {
2488 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2489 BE_IF_FLAGS_PROMISCUOUS |
2490 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2491 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2492
ac6a0c4a 2493 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2494 cap_flags |= BE_IF_FLAGS_RSS;
2495 en_flags |= BE_IF_FLAGS_RSS;
2496 }
ba343c77 2497 }
73d540f2
SP
2498
2499 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2500 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2501 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2502 if (status != 0)
2503 goto do_none;
2504
ba343c77 2505 if (be_physfn(adapter)) {
c99ac3e7
AK
2506 if (adapter->sriov_enabled) {
2507 while (vf < num_vfs) {
2508 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2509 BE_IF_FLAGS_BROADCAST;
2510 status = be_cmd_if_create(adapter, cap_flags,
2511 en_flags, mac, true,
64600ea5 2512 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2513 NULL, vf+1);
c99ac3e7
AK
2514 if (status) {
2515 dev_err(&adapter->pdev->dev,
2516 "Interface Create failed for VF %d\n",
2517 vf);
2518 goto if_destroy;
2519 }
2520 adapter->vf_cfg[vf].vf_pmac_id =
2521 BE_INVALID_PMAC_ID;
2522 vf++;
ba343c77 2523 }
84e5b9f7 2524 }
c99ac3e7 2525 } else {
ba343c77
SB
2526 status = be_cmd_mac_addr_query(adapter, mac,
2527 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2528 if (!status) {
2529 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2530 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2531 }
2532 }
2533
6b7c5b94
SP
2534 status = be_tx_queues_create(adapter);
2535 if (status != 0)
2536 goto if_destroy;
2537
2538 status = be_rx_queues_create(adapter);
2539 if (status != 0)
2540 goto tx_qs_destroy;
2541
5fb379ee
SP
2542 status = be_mcc_queues_create(adapter);
2543 if (status != 0)
2544 goto rx_qs_destroy;
6b7c5b94 2545
0dffc83e
AK
2546 adapter->link_speed = -1;
2547
6b7c5b94
SP
2548 return 0;
2549
5fb379ee
SP
2550rx_qs_destroy:
2551 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2552tx_qs_destroy:
2553 be_tx_queues_destroy(adapter);
2554if_destroy:
c99ac3e7
AK
2555 if (be_physfn(adapter) && adapter->sriov_enabled)
2556 for (vf = 0; vf < num_vfs; vf++)
2557 if (adapter->vf_cfg[vf].vf_if_handle)
2558 be_cmd_if_destroy(adapter,
658681f7
AK
2559 adapter->vf_cfg[vf].vf_if_handle,
2560 vf + 1);
2561 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2562do_none:
2563 return status;
2564}
2565
5fb379ee
SP
2566static int be_clear(struct be_adapter *adapter)
2567{
7ab8b0b4
AK
2568 int vf;
2569
c99ac3e7 2570 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2571 be_vf_eth_addr_rem(adapter);
2572
1a8887d8 2573 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2574 be_rx_queues_destroy(adapter);
2575 be_tx_queues_destroy(adapter);
1f5db833 2576 adapter->eq_next_idx = 0;
5fb379ee 2577
7ab8b0b4
AK
2578 if (be_physfn(adapter) && adapter->sriov_enabled)
2579 for (vf = 0; vf < num_vfs; vf++)
2580 if (adapter->vf_cfg[vf].vf_if_handle)
2581 be_cmd_if_destroy(adapter,
2582 adapter->vf_cfg[vf].vf_if_handle,
2583 vf + 1);
2584
658681f7 2585 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2586
2243e2e9
SP
2587 /* tell fw we're done with firing cmds */
2588 be_cmd_fw_clean(adapter);
5fb379ee
SP
2589 return 0;
2590}
2591
6b7c5b94 2592
84517482 2593#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2594static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2595 const u8 *p, u32 img_start, int image_size,
2596 int hdr_size)
fa9a6fed
SB
2597{
2598 u32 crc_offset;
2599 u8 flashed_crc[4];
2600 int status;
3f0d4560
AK
2601
2602 crc_offset = hdr_size + img_start + image_size - 4;
2603
fa9a6fed 2604 p += crc_offset;
3f0d4560
AK
2605
2606 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2607 (image_size - 4));
fa9a6fed
SB
2608 if (status) {
2609 dev_err(&adapter->pdev->dev,
2610 "could not get crc from flash, not flashing redboot\n");
2611 return false;
2612 }
2613
2614 /*update redboot only if crc does not match*/
2615 if (!memcmp(flashed_crc, p, 4))
2616 return false;
2617 else
2618 return true;
fa9a6fed
SB
2619}
2620
3f0d4560 2621static int be_flash_data(struct be_adapter *adapter,
84517482 2622 const struct firmware *fw,
3f0d4560
AK
2623 struct be_dma_mem *flash_cmd, int num_of_images)
2624
84517482 2625{
3f0d4560
AK
2626 int status = 0, i, filehdr_size = 0;
2627 u32 total_bytes = 0, flash_op;
84517482
AK
2628 int num_bytes;
2629 const u8 *p = fw->data;
2630 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2631 const struct flash_comp *pflashcomp;
9fe96934 2632 int num_comp;
3f0d4560 2633
215faf9c 2634 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2635 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2636 FLASH_IMAGE_MAX_SIZE_g3},
2637 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2638 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2639 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2640 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2641 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2642 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2643 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2644 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2645 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2646 FLASH_IMAGE_MAX_SIZE_g3},
2647 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2648 FLASH_IMAGE_MAX_SIZE_g3},
2649 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2650 FLASH_IMAGE_MAX_SIZE_g3},
2651 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2652 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2653 };
215faf9c 2654 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2655 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2656 FLASH_IMAGE_MAX_SIZE_g2},
2657 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2658 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2659 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2660 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2661 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2662 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2663 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2664 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2665 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2666 FLASH_IMAGE_MAX_SIZE_g2},
2667 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2668 FLASH_IMAGE_MAX_SIZE_g2},
2669 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2670 FLASH_IMAGE_MAX_SIZE_g2}
2671 };
2672
2673 if (adapter->generation == BE_GEN3) {
2674 pflashcomp = gen3_flash_types;
2675 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2676 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2677 } else {
2678 pflashcomp = gen2_flash_types;
2679 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2680 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2681 }
9fe96934
SB
2682 for (i = 0; i < num_comp; i++) {
2683 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2684 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2685 continue;
3f0d4560
AK
2686 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2687 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2688 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2689 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2690 continue;
2691 p = fw->data;
2692 p += filehdr_size + pflashcomp[i].offset
2693 + (num_of_images * sizeof(struct image_hdr));
2694 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2695 return -1;
3f0d4560
AK
2696 total_bytes = pflashcomp[i].size;
2697 while (total_bytes) {
2698 if (total_bytes > 32*1024)
2699 num_bytes = 32*1024;
2700 else
2701 num_bytes = total_bytes;
2702 total_bytes -= num_bytes;
2703
2704 if (!total_bytes)
2705 flash_op = FLASHROM_OPER_FLASH;
2706 else
2707 flash_op = FLASHROM_OPER_SAVE;
2708 memcpy(req->params.data_buf, p, num_bytes);
2709 p += num_bytes;
2710 status = be_cmd_write_flashrom(adapter, flash_cmd,
2711 pflashcomp[i].optype, flash_op, num_bytes);
2712 if (status) {
2713 dev_err(&adapter->pdev->dev,
2714 "cmd to write to flash rom failed.\n");
2715 return -1;
2716 }
84517482 2717 }
84517482 2718 }
84517482
AK
2719 return 0;
2720}
2721
3f0d4560
AK
2722static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2723{
2724 if (fhdr == NULL)
2725 return 0;
2726 if (fhdr->build[0] == '3')
2727 return BE_GEN3;
2728 else if (fhdr->build[0] == '2')
2729 return BE_GEN2;
2730 else
2731 return 0;
2732}
2733
485bf569
SN
2734static int lancer_fw_download(struct be_adapter *adapter,
2735 const struct firmware *fw)
84517482 2736{
485bf569
SN
2737#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2738#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2739 struct be_dma_mem flash_cmd;
485bf569
SN
2740 const u8 *data_ptr = NULL;
2741 u8 *dest_image_ptr = NULL;
2742 size_t image_size = 0;
2743 u32 chunk_size = 0;
2744 u32 data_written = 0;
2745 u32 offset = 0;
2746 int status = 0;
2747 u8 add_status = 0;
84517482 2748
485bf569 2749 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2750 dev_err(&adapter->pdev->dev,
485bf569
SN
2751 "FW Image not properly aligned. "
2752 "Length must be 4 byte aligned.\n");
2753 status = -EINVAL;
2754 goto lancer_fw_exit;
d9efd2af
SB
2755 }
2756
485bf569
SN
2757 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2758 + LANCER_FW_DOWNLOAD_CHUNK;
2759 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2760 &flash_cmd.dma, GFP_KERNEL);
2761 if (!flash_cmd.va) {
2762 status = -ENOMEM;
2763 dev_err(&adapter->pdev->dev,
2764 "Memory allocation failure while flashing\n");
2765 goto lancer_fw_exit;
2766 }
84517482 2767
485bf569
SN
2768 dest_image_ptr = flash_cmd.va +
2769 sizeof(struct lancer_cmd_req_write_object);
2770 image_size = fw->size;
2771 data_ptr = fw->data;
2772
2773 while (image_size) {
2774 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2775
2776 /* Copy the image chunk content. */
2777 memcpy(dest_image_ptr, data_ptr, chunk_size);
2778
2779 status = lancer_cmd_write_object(adapter, &flash_cmd,
2780 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2781 &data_written, &add_status);
2782
2783 if (status)
2784 break;
2785
2786 offset += data_written;
2787 data_ptr += data_written;
2788 image_size -= data_written;
2789 }
2790
2791 if (!status) {
2792 /* Commit the FW written */
2793 status = lancer_cmd_write_object(adapter, &flash_cmd,
2794 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2795 &data_written, &add_status);
2796 }
2797
2798 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2799 flash_cmd.dma);
2800 if (status) {
2801 dev_err(&adapter->pdev->dev,
2802 "Firmware load error. "
2803 "Status code: 0x%x Additional Status: 0x%x\n",
2804 status, add_status);
2805 goto lancer_fw_exit;
2806 }
2807
2808 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2809lancer_fw_exit:
2810 return status;
2811}
2812
2813static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2814{
2815 struct flash_file_hdr_g2 *fhdr;
2816 struct flash_file_hdr_g3 *fhdr3;
2817 struct image_hdr *img_hdr_ptr = NULL;
2818 struct be_dma_mem flash_cmd;
2819 const u8 *p;
2820 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2821
2822 p = fw->data;
3f0d4560 2823 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2824
84517482 2825 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2826 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2827 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2828 if (!flash_cmd.va) {
2829 status = -ENOMEM;
2830 dev_err(&adapter->pdev->dev,
2831 "Memory allocation failure while flashing\n");
485bf569 2832 goto be_fw_exit;
84517482
AK
2833 }
2834
3f0d4560
AK
2835 if ((adapter->generation == BE_GEN3) &&
2836 (get_ufigen_type(fhdr) == BE_GEN3)) {
2837 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2838 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2839 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2840 img_hdr_ptr = (struct image_hdr *) (fw->data +
2841 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2842 i * sizeof(struct image_hdr)));
2843 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2844 status = be_flash_data(adapter, fw, &flash_cmd,
2845 num_imgs);
3f0d4560
AK
2846 }
2847 } else if ((adapter->generation == BE_GEN2) &&
2848 (get_ufigen_type(fhdr) == BE_GEN2)) {
2849 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2850 } else {
2851 dev_err(&adapter->pdev->dev,
2852 "UFI and Interface are not compatible for flashing\n");
2853 status = -1;
84517482
AK
2854 }
2855
2b7bcebf
IV
2856 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2857 flash_cmd.dma);
84517482
AK
2858 if (status) {
2859 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2860 goto be_fw_exit;
84517482
AK
2861 }
2862
af901ca1 2863 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2864
485bf569
SN
2865be_fw_exit:
2866 return status;
2867}
2868
2869int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2870{
2871 const struct firmware *fw;
2872 int status;
2873
2874 if (!netif_running(adapter->netdev)) {
2875 dev_err(&adapter->pdev->dev,
2876 "Firmware load not allowed (interface is down)\n");
2877 return -1;
2878 }
2879
2880 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2881 if (status)
2882 goto fw_exit;
2883
2884 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2885
2886 if (lancer_chip(adapter))
2887 status = lancer_fw_download(adapter, fw);
2888 else
2889 status = be_fw_download(adapter, fw);
2890
84517482
AK
2891fw_exit:
2892 release_firmware(fw);
2893 return status;
2894}
2895
6b7c5b94
SP
2896static struct net_device_ops be_netdev_ops = {
2897 .ndo_open = be_open,
2898 .ndo_stop = be_close,
2899 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2900 .ndo_set_rx_mode = be_set_multicast_list,
2901 .ndo_set_mac_address = be_mac_addr_set,
2902 .ndo_change_mtu = be_change_mtu,
2903 .ndo_validate_addr = eth_validate_addr,
2904 .ndo_vlan_rx_register = be_vlan_register,
2905 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2906 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2907 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2908 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2909 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2910 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2911};
2912
2913static void be_netdev_init(struct net_device *netdev)
2914{
2915 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2916 struct be_rx_obj *rxo;
2917 int i;
6b7c5b94 2918
6332c8d3 2919 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2920 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2921 NETIF_F_HW_VLAN_TX;
2922 if (be_multi_rxq(adapter))
2923 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2924
2925 netdev->features |= netdev->hw_features |
8b8ddc68 2926 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2927
eb8a50d9 2928 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 2929 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2930
6b7c5b94
SP
2931 netdev->flags |= IFF_MULTICAST;
2932
9e90c961
AK
2933 /* Default settings for Rx and Tx flow control */
2934 adapter->rx_fc = true;
2935 adapter->tx_fc = true;
2936
c190e3c8
AK
2937 netif_set_gso_max_size(netdev, 65535);
2938
6b7c5b94
SP
2939 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2940
2941 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2942
3abcdeda
SP
2943 for_all_rx_queues(adapter, rxo, i)
2944 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2945 BE_NAPI_WEIGHT);
2946
5fb379ee 2947 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2948 BE_NAPI_WEIGHT);
6b7c5b94
SP
2949}
2950
2951static void be_unmap_pci_bars(struct be_adapter *adapter)
2952{
8788fdc2
SP
2953 if (adapter->csr)
2954 iounmap(adapter->csr);
2955 if (adapter->db)
2956 iounmap(adapter->db);
ba343c77 2957 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2958 iounmap(adapter->pcicfg);
6b7c5b94
SP
2959}
2960
2961static int be_map_pci_bars(struct be_adapter *adapter)
2962{
2963 u8 __iomem *addr;
ba343c77 2964 int pcicfg_reg, db_reg;
6b7c5b94 2965
fe6d2a38
SP
2966 if (lancer_chip(adapter)) {
2967 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2968 pci_resource_len(adapter->pdev, 0));
2969 if (addr == NULL)
2970 return -ENOMEM;
2971 adapter->db = addr;
2972 return 0;
2973 }
2974
ba343c77
SB
2975 if (be_physfn(adapter)) {
2976 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2977 pci_resource_len(adapter->pdev, 2));
2978 if (addr == NULL)
2979 return -ENOMEM;
2980 adapter->csr = addr;
2981 }
6b7c5b94 2982
ba343c77 2983 if (adapter->generation == BE_GEN2) {
7b139c83 2984 pcicfg_reg = 1;
ba343c77
SB
2985 db_reg = 4;
2986 } else {
7b139c83 2987 pcicfg_reg = 0;
ba343c77
SB
2988 if (be_physfn(adapter))
2989 db_reg = 4;
2990 else
2991 db_reg = 0;
2992 }
2993 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2994 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2995 if (addr == NULL)
2996 goto pci_map_err;
ba343c77
SB
2997 adapter->db = addr;
2998
2999 if (be_physfn(adapter)) {
3000 addr = ioremap_nocache(
3001 pci_resource_start(adapter->pdev, pcicfg_reg),
3002 pci_resource_len(adapter->pdev, pcicfg_reg));
3003 if (addr == NULL)
3004 goto pci_map_err;
3005 adapter->pcicfg = addr;
3006 } else
3007 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
3008
3009 return 0;
3010pci_map_err:
3011 be_unmap_pci_bars(adapter);
3012 return -ENOMEM;
3013}
3014
3015
3016static void be_ctrl_cleanup(struct be_adapter *adapter)
3017{
8788fdc2 3018 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3019
3020 be_unmap_pci_bars(adapter);
3021
3022 if (mem->va)
2b7bcebf
IV
3023 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3024 mem->dma);
e7b909a6
SP
3025
3026 mem = &adapter->mc_cmd_mem;
3027 if (mem->va)
2b7bcebf
IV
3028 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3029 mem->dma);
6b7c5b94
SP
3030}
3031
6b7c5b94
SP
3032static int be_ctrl_init(struct be_adapter *adapter)
3033{
8788fdc2
SP
3034 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3035 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 3036 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 3037 int status;
6b7c5b94
SP
3038
3039 status = be_map_pci_bars(adapter);
3040 if (status)
e7b909a6 3041 goto done;
6b7c5b94
SP
3042
3043 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3044 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3045 mbox_mem_alloc->size,
3046 &mbox_mem_alloc->dma,
3047 GFP_KERNEL);
6b7c5b94 3048 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3049 status = -ENOMEM;
3050 goto unmap_pci_bars;
6b7c5b94 3051 }
e7b909a6 3052
6b7c5b94
SP
3053 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3054 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3055 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3056 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
3057
3058 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2b7bcebf
IV
3059 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3060 mc_cmd_mem->size, &mc_cmd_mem->dma,
3061 GFP_KERNEL);
e7b909a6
SP
3062 if (mc_cmd_mem->va == NULL) {
3063 status = -ENOMEM;
3064 goto free_mbox;
3065 }
3066 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3067
2984961c 3068 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3069 spin_lock_init(&adapter->mcc_lock);
3070 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3071
dd131e76 3072 init_completion(&adapter->flash_compl);
cf588477 3073 pci_save_state(adapter->pdev);
6b7c5b94 3074 return 0;
e7b909a6
SP
3075
3076free_mbox:
2b7bcebf
IV
3077 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3078 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3079
3080unmap_pci_bars:
3081 be_unmap_pci_bars(adapter);
3082
3083done:
3084 return status;
6b7c5b94
SP
3085}
3086
3087static void be_stats_cleanup(struct be_adapter *adapter)
3088{
3abcdeda 3089 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3090
3091 if (cmd->va)
2b7bcebf
IV
3092 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3093 cmd->va, cmd->dma);
6b7c5b94
SP
3094}
3095
3096static int be_stats_init(struct be_adapter *adapter)
3097{
3abcdeda 3098 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3099
005d5696 3100 if (adapter->generation == BE_GEN2) {
89a88ab8 3101 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3102 } else {
3103 if (lancer_chip(adapter))
3104 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3105 else
3106 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3107 }
2b7bcebf
IV
3108 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3109 GFP_KERNEL);
6b7c5b94
SP
3110 if (cmd->va == NULL)
3111 return -1;
d291b9af 3112 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3113 return 0;
3114}
3115
3116static void __devexit be_remove(struct pci_dev *pdev)
3117{
3118 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3119
6b7c5b94
SP
3120 if (!adapter)
3121 return;
3122
f203af70
SK
3123 cancel_delayed_work_sync(&adapter->work);
3124
6b7c5b94
SP
3125 unregister_netdev(adapter->netdev);
3126
5fb379ee
SP
3127 be_clear(adapter);
3128
6b7c5b94
SP
3129 be_stats_cleanup(adapter);
3130
3131 be_ctrl_cleanup(adapter);
3132
48f5a191 3133 kfree(adapter->vf_cfg);
ba343c77
SB
3134 be_sriov_disable(adapter);
3135
8d56ff11 3136 be_msix_disable(adapter);
6b7c5b94
SP
3137
3138 pci_set_drvdata(pdev, NULL);
3139 pci_release_regions(pdev);
3140 pci_disable_device(pdev);
3141
3142 free_netdev(adapter->netdev);
3143}
3144
2243e2e9 3145static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3146{
6b7c5b94 3147 int status;
2243e2e9 3148 u8 mac[ETH_ALEN];
6b7c5b94 3149
2243e2e9 3150 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
3151 if (status)
3152 return status;
3153
3abcdeda
SP
3154 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3155 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3156 if (status)
3157 return status;
3158
2243e2e9 3159 memset(mac, 0, ETH_ALEN);
ba343c77 3160
12f4d0a8
ME
3161 /* A default permanent address is given to each VF for Lancer*/
3162 if (be_physfn(adapter) || lancer_chip(adapter)) {
ba343c77 3163 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 3164 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 3165
ba343c77
SB
3166 if (status)
3167 return status;
ca9e4988 3168
ba343c77
SB
3169 if (!is_valid_ether_addr(mac))
3170 return -EADDRNOTAVAIL;
3171
3172 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3173 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3174 }
6b7c5b94 3175
3486be29 3176 if (adapter->function_mode & 0x400)
82903e4b
AK
3177 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3178 else
3179 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3180
9e1453c5
AK
3181 status = be_cmd_get_cntl_attributes(adapter);
3182 if (status)
3183 return status;
3184
2e588f84 3185 be_cmd_check_native_mode(adapter);
2243e2e9 3186 return 0;
6b7c5b94
SP
3187}
3188
fe6d2a38
SP
3189static int be_dev_family_check(struct be_adapter *adapter)
3190{
3191 struct pci_dev *pdev = adapter->pdev;
3192 u32 sli_intf = 0, if_type;
3193
3194 switch (pdev->device) {
3195 case BE_DEVICE_ID1:
3196 case OC_DEVICE_ID1:
3197 adapter->generation = BE_GEN2;
3198 break;
3199 case BE_DEVICE_ID2:
3200 case OC_DEVICE_ID2:
3201 adapter->generation = BE_GEN3;
3202 break;
3203 case OC_DEVICE_ID3:
12f4d0a8 3204 case OC_DEVICE_ID4:
fe6d2a38
SP
3205 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3206 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3207 SLI_INTF_IF_TYPE_SHIFT;
3208
3209 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3210 if_type != 0x02) {
3211 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3212 return -EINVAL;
3213 }
fe6d2a38
SP
3214 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3215 SLI_INTF_FAMILY_SHIFT);
3216 adapter->generation = BE_GEN3;
3217 break;
3218 default:
3219 adapter->generation = 0;
3220 }
3221 return 0;
3222}
3223
37eed1cb
PR
3224static int lancer_wait_ready(struct be_adapter *adapter)
3225{
3226#define SLIPORT_READY_TIMEOUT 500
3227 u32 sliport_status;
3228 int status = 0, i;
3229
3230 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3231 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3232 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3233 break;
3234
3235 msleep(20);
3236 }
3237
3238 if (i == SLIPORT_READY_TIMEOUT)
3239 status = -1;
3240
3241 return status;
3242}
3243
3244static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3245{
3246 int status;
3247 u32 sliport_status, err, reset_needed;
3248 status = lancer_wait_ready(adapter);
3249 if (!status) {
3250 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3251 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3252 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3253 if (err && reset_needed) {
3254 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3255 adapter->db + SLIPORT_CONTROL_OFFSET);
3256
3257 /* check adapter has corrected the error */
3258 status = lancer_wait_ready(adapter);
3259 sliport_status = ioread32(adapter->db +
3260 SLIPORT_STATUS_OFFSET);
3261 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3262 SLIPORT_STATUS_RN_MASK);
3263 if (status || sliport_status)
3264 status = -1;
3265 } else if (err || reset_needed) {
3266 status = -1;
3267 }
3268 }
3269 return status;
3270}
3271
6b7c5b94
SP
3272static int __devinit be_probe(struct pci_dev *pdev,
3273 const struct pci_device_id *pdev_id)
3274{
3275 int status = 0;
3276 struct be_adapter *adapter;
3277 struct net_device *netdev;
6b7c5b94
SP
3278
3279 status = pci_enable_device(pdev);
3280 if (status)
3281 goto do_none;
3282
3283 status = pci_request_regions(pdev, DRV_NAME);
3284 if (status)
3285 goto disable_dev;
3286 pci_set_master(pdev);
3287
3288 netdev = alloc_etherdev(sizeof(struct be_adapter));
3289 if (netdev == NULL) {
3290 status = -ENOMEM;
3291 goto rel_reg;
3292 }
3293 adapter = netdev_priv(netdev);
3294 adapter->pdev = pdev;
3295 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3296
3297 status = be_dev_family_check(adapter);
63657b9c 3298 if (status)
fe6d2a38
SP
3299 goto free_netdev;
3300
6b7c5b94 3301 adapter->netdev = netdev;
2243e2e9 3302 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3303
2b7bcebf 3304 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3305 if (!status) {
3306 netdev->features |= NETIF_F_HIGHDMA;
3307 } else {
2b7bcebf 3308 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3309 if (status) {
3310 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3311 goto free_netdev;
3312 }
3313 }
3314
ba343c77 3315 be_sriov_enable(adapter);
48f5a191
AK
3316 if (adapter->sriov_enabled) {
3317 adapter->vf_cfg = kcalloc(num_vfs,
3318 sizeof(struct be_vf_cfg), GFP_KERNEL);
3319
3320 if (!adapter->vf_cfg)
3321 goto free_netdev;
3322 }
ba343c77 3323
6b7c5b94
SP
3324 status = be_ctrl_init(adapter);
3325 if (status)
48f5a191 3326 goto free_vf_cfg;
6b7c5b94 3327
37eed1cb
PR
3328 if (lancer_chip(adapter)) {
3329 status = lancer_test_and_set_rdy_state(adapter);
3330 if (status) {
3331 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3332 goto ctrl_clean;
37eed1cb
PR
3333 }
3334 }
3335
2243e2e9 3336 /* sync up with fw's ready state */
ba343c77
SB
3337 if (be_physfn(adapter)) {
3338 status = be_cmd_POST(adapter);
3339 if (status)
3340 goto ctrl_clean;
ba343c77 3341 }
6b7c5b94 3342
2243e2e9
SP
3343 /* tell fw we're ready to fire cmds */
3344 status = be_cmd_fw_init(adapter);
6b7c5b94 3345 if (status)
2243e2e9
SP
3346 goto ctrl_clean;
3347
a4b4dfab
AK
3348 status = be_cmd_reset_function(adapter);
3349 if (status)
3350 goto ctrl_clean;
556ae191 3351
2243e2e9
SP
3352 status = be_stats_init(adapter);
3353 if (status)
3354 goto ctrl_clean;
3355
3356 status = be_get_config(adapter);
6b7c5b94
SP
3357 if (status)
3358 goto stats_clean;
6b7c5b94 3359
3abcdeda
SP
3360 be_msix_enable(adapter);
3361
6b7c5b94 3362 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3363
5fb379ee
SP
3364 status = be_setup(adapter);
3365 if (status)
3abcdeda 3366 goto msix_disable;
2243e2e9 3367
3abcdeda 3368 be_netdev_init(netdev);
6b7c5b94
SP
3369 status = register_netdev(netdev);
3370 if (status != 0)
5fb379ee 3371 goto unsetup;
63a76944 3372 netif_carrier_off(netdev);
6b7c5b94 3373
e6319365 3374 if (be_physfn(adapter) && adapter->sriov_enabled) {
d0381c42
AK
3375 u8 mac_speed;
3376 bool link_up;
3377 u16 vf, lnk_speed;
3378
12f4d0a8
ME
3379 if (!lancer_chip(adapter)) {
3380 status = be_vf_eth_addr_config(adapter);
3381 if (status)
3382 goto unreg_netdev;
3383 }
d0381c42
AK
3384
3385 for (vf = 0; vf < num_vfs; vf++) {
3386 status = be_cmd_link_status_query(adapter, &link_up,
3387 &mac_speed, &lnk_speed, vf + 1);
3388 if (!status)
3389 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3390 else
3391 goto unreg_netdev;
3392 }
e6319365
AK
3393 }
3394
c4ca2374 3395 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04
SK
3396 /* By default all priorities are enabled.
3397 * Needed in case of no GRP5 evt support
3398 */
3399 adapter->vlan_prio_bmap = 0xff;
3400
f203af70 3401 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3402 return 0;
3403
e6319365
AK
3404unreg_netdev:
3405 unregister_netdev(netdev);
5fb379ee
SP
3406unsetup:
3407 be_clear(adapter);
3abcdeda
SP
3408msix_disable:
3409 be_msix_disable(adapter);
6b7c5b94
SP
3410stats_clean:
3411 be_stats_cleanup(adapter);
3412ctrl_clean:
3413 be_ctrl_cleanup(adapter);
48f5a191
AK
3414free_vf_cfg:
3415 kfree(adapter->vf_cfg);
6b7c5b94 3416free_netdev:
ba343c77 3417 be_sriov_disable(adapter);
fe6d2a38 3418 free_netdev(netdev);
8d56ff11 3419 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3420rel_reg:
3421 pci_release_regions(pdev);
3422disable_dev:
3423 pci_disable_device(pdev);
3424do_none:
c4ca2374 3425 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3426 return status;
3427}
3428
3429static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3430{
3431 struct be_adapter *adapter = pci_get_drvdata(pdev);
3432 struct net_device *netdev = adapter->netdev;
3433
a4ca055f 3434 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3435 if (adapter->wol)
3436 be_setup_wol(adapter, true);
3437
6b7c5b94
SP
3438 netif_device_detach(netdev);
3439 if (netif_running(netdev)) {
3440 rtnl_lock();
3441 be_close(netdev);
3442 rtnl_unlock();
3443 }
9e90c961 3444 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3445 be_clear(adapter);
6b7c5b94 3446
a4ca055f 3447 be_msix_disable(adapter);
6b7c5b94
SP
3448 pci_save_state(pdev);
3449 pci_disable_device(pdev);
3450 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3451 return 0;
3452}
3453
3454static int be_resume(struct pci_dev *pdev)
3455{
3456 int status = 0;
3457 struct be_adapter *adapter = pci_get_drvdata(pdev);
3458 struct net_device *netdev = adapter->netdev;
3459
3460 netif_device_detach(netdev);
3461
3462 status = pci_enable_device(pdev);
3463 if (status)
3464 return status;
3465
3466 pci_set_power_state(pdev, 0);
3467 pci_restore_state(pdev);
3468
a4ca055f 3469 be_msix_enable(adapter);
2243e2e9
SP
3470 /* tell fw we're ready to fire cmds */
3471 status = be_cmd_fw_init(adapter);
3472 if (status)
3473 return status;
3474
9b0365f1 3475 be_setup(adapter);
6b7c5b94
SP
3476 if (netif_running(netdev)) {
3477 rtnl_lock();
3478 be_open(netdev);
3479 rtnl_unlock();
3480 }
3481 netif_device_attach(netdev);
71d8d1b5
AK
3482
3483 if (adapter->wol)
3484 be_setup_wol(adapter, false);
a4ca055f
AK
3485
3486 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3487 return 0;
3488}
3489
82456b03
SP
3490/*
3491 * An FLR will stop BE from DMAing any data.
3492 */
3493static void be_shutdown(struct pci_dev *pdev)
3494{
3495 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3496
2d5d4154
AK
3497 if (!adapter)
3498 return;
82456b03 3499
0f4a6828 3500 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3501
2d5d4154 3502 netif_device_detach(adapter->netdev);
82456b03 3503
82456b03
SP
3504 if (adapter->wol)
3505 be_setup_wol(adapter, true);
3506
57841869
AK
3507 be_cmd_reset_function(adapter);
3508
82456b03 3509 pci_disable_device(pdev);
82456b03
SP
3510}
3511
cf588477
SP
3512static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3513 pci_channel_state_t state)
3514{
3515 struct be_adapter *adapter = pci_get_drvdata(pdev);
3516 struct net_device *netdev = adapter->netdev;
3517
3518 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3519
3520 adapter->eeh_err = true;
3521
3522 netif_device_detach(netdev);
3523
3524 if (netif_running(netdev)) {
3525 rtnl_lock();
3526 be_close(netdev);
3527 rtnl_unlock();
3528 }
3529 be_clear(adapter);
3530
3531 if (state == pci_channel_io_perm_failure)
3532 return PCI_ERS_RESULT_DISCONNECT;
3533
3534 pci_disable_device(pdev);
3535
3536 return PCI_ERS_RESULT_NEED_RESET;
3537}
3538
3539static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3540{
3541 struct be_adapter *adapter = pci_get_drvdata(pdev);
3542 int status;
3543
3544 dev_info(&adapter->pdev->dev, "EEH reset\n");
3545 adapter->eeh_err = false;
3546
3547 status = pci_enable_device(pdev);
3548 if (status)
3549 return PCI_ERS_RESULT_DISCONNECT;
3550
3551 pci_set_master(pdev);
3552 pci_set_power_state(pdev, 0);
3553 pci_restore_state(pdev);
3554
3555 /* Check if card is ok and fw is ready */
3556 status = be_cmd_POST(adapter);
3557 if (status)
3558 return PCI_ERS_RESULT_DISCONNECT;
3559
3560 return PCI_ERS_RESULT_RECOVERED;
3561}
3562
3563static void be_eeh_resume(struct pci_dev *pdev)
3564{
3565 int status = 0;
3566 struct be_adapter *adapter = pci_get_drvdata(pdev);
3567 struct net_device *netdev = adapter->netdev;
3568
3569 dev_info(&adapter->pdev->dev, "EEH resume\n");
3570
3571 pci_save_state(pdev);
3572
3573 /* tell fw we're ready to fire cmds */
3574 status = be_cmd_fw_init(adapter);
3575 if (status)
3576 goto err;
3577
3578 status = be_setup(adapter);
3579 if (status)
3580 goto err;
3581
3582 if (netif_running(netdev)) {
3583 status = be_open(netdev);
3584 if (status)
3585 goto err;
3586 }
3587 netif_device_attach(netdev);
3588 return;
3589err:
3590 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3591}
3592
3593static struct pci_error_handlers be_eeh_handlers = {
3594 .error_detected = be_eeh_err_detected,
3595 .slot_reset = be_eeh_reset,
3596 .resume = be_eeh_resume,
3597};
3598
6b7c5b94
SP
3599static struct pci_driver be_driver = {
3600 .name = DRV_NAME,
3601 .id_table = be_dev_ids,
3602 .probe = be_probe,
3603 .remove = be_remove,
3604 .suspend = be_suspend,
cf588477 3605 .resume = be_resume,
82456b03 3606 .shutdown = be_shutdown,
cf588477 3607 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3608};
3609
3610static int __init be_init_module(void)
3611{
8e95a202
JP
3612 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3613 rx_frag_size != 2048) {
6b7c5b94
SP
3614 printk(KERN_WARNING DRV_NAME
3615 " : Module param rx_frag_size must be 2048/4096/8192."
3616 " Using 2048\n");
3617 rx_frag_size = 2048;
3618 }
6b7c5b94
SP
3619
3620 return pci_register_driver(&be_driver);
3621}
3622module_init(be_init_module);
3623
3624static void __exit be_exit_module(void)
3625{
3626 pci_unregister_driver(&be_driver);
3627}
3628module_exit(be_exit_module);
This page took 0.53726 seconds and 5 git commands to generate.