be2net: fix access to SEMAPHORE reg
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("ServerEngines Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48 { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
111 "NETC",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131 struct be_dma_mem *mem = &q->dma_mem;
132 if (mem->va) {
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
135 mem->va = NULL;
136 }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141 {
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
150 if (!mem->va)
151 return -ENOMEM;
152 memset(mem->va, 0, mem->size);
153 return 0;
154 }
155
156 static void be_intr_set(struct be_adapter *adapter, bool enable)
157 {
158 u32 reg, enabled;
159
160 if (adapter->eeh_error)
161 return;
162
163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
167 if (!enabled && enable)
168 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169 else if (enabled && !enable)
170 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
171 else
172 return;
173
174 pci_write_config_dword(adapter->pdev,
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176 }
177
178 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180 u32 val = 0;
181 val |= qid & DB_RQ_RING_ID_MASK;
182 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
183
184 wmb();
185 iowrite32(val, adapter->db + DB_RQ_OFFSET);
186 }
187
188 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
189 {
190 u32 val = 0;
191 val |= qid & DB_TXULP_RING_ID_MASK;
192 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
193
194 wmb();
195 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
196 }
197
198 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
199 bool arm, bool clear_int, u16 num_popped)
200 {
201 u32 val = 0;
202 val |= qid & DB_EQ_RING_ID_MASK;
203 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204 DB_EQ_RING_ID_EXT_MASK_SHIFT);
205
206 if (adapter->eeh_error)
207 return;
208
209 if (arm)
210 val |= 1 << DB_EQ_REARM_SHIFT;
211 if (clear_int)
212 val |= 1 << DB_EQ_CLR_SHIFT;
213 val |= 1 << DB_EQ_EVNT_SHIFT;
214 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
215 iowrite32(val, adapter->db + DB_EQ_OFFSET);
216 }
217
218 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
219 {
220 u32 val = 0;
221 val |= qid & DB_CQ_RING_ID_MASK;
222 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223 DB_CQ_RING_ID_EXT_MASK_SHIFT);
224
225 if (adapter->eeh_error)
226 return;
227
228 if (arm)
229 val |= 1 << DB_CQ_REARM_SHIFT;
230 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
231 iowrite32(val, adapter->db + DB_CQ_OFFSET);
232 }
233
234 static int be_mac_addr_set(struct net_device *netdev, void *p)
235 {
236 struct be_adapter *adapter = netdev_priv(netdev);
237 struct sockaddr *addr = p;
238 int status = 0;
239 u8 current_mac[ETH_ALEN];
240 u32 pmac_id = adapter->pmac_id[0];
241 bool active_mac = true;
242
243 if (!is_valid_ether_addr(addr->sa_data))
244 return -EADDRNOTAVAIL;
245
246 /* For BE VF, MAC address is already activated by PF.
247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
274 if (status)
275 goto err;
276
277 if (active_mac)
278 be_cmd_pmac_del(adapter, adapter->if_handle,
279 pmac_id, 0);
280 done:
281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282 return 0;
283 err:
284 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
285 return status;
286 }
287
288 /* BE2 supports only v0 cmd */
289 static void *hw_stats_from_cmd(struct be_adapter *adapter)
290 {
291 if (BE2_chip(adapter)) {
292 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294 return &cmd->hw_stats;
295 } else {
296 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298 return &cmd->hw_stats;
299 }
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304 {
305 if (BE2_chip(adapter)) {
306 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308 return &hw_stats->erx;
309 } else {
310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312 return &hw_stats->erx;
313 }
314 }
315
316 static void populate_be_v0_stats(struct be_adapter *adapter)
317 {
318 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
321 struct be_port_rxf_stats_v0 *port_stats =
322 &rxf_stats->port[adapter->port_num];
323 struct be_drv_stats *drvs = &adapter->drv_stats;
324
325 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
326 drvs->rx_pause_frames = port_stats->rx_pause_frames;
327 drvs->rx_crc_errors = port_stats->rx_crc_errors;
328 drvs->rx_control_frames = port_stats->rx_control_frames;
329 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
340 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
341 drvs->rx_dropped_header_too_small =
342 port_stats->rx_dropped_header_too_small;
343 drvs->rx_address_mismatch_drops =
344 port_stats->rx_address_mismatch_drops +
345 port_stats->rx_vlan_mismatch_drops;
346 drvs->rx_alignment_symbol_errors =
347 port_stats->rx_alignment_symbol_errors;
348
349 drvs->tx_pauseframes = port_stats->tx_pauseframes;
350 drvs->tx_controlframes = port_stats->tx_controlframes;
351
352 if (adapter->port_num)
353 drvs->jabber_events = rxf_stats->port1_jabber_events;
354 else
355 drvs->jabber_events = rxf_stats->port0_jabber_events;
356 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
357 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
358 drvs->forwarded_packets = rxf_stats->forwarded_packets;
359 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
360 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
362 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363 }
364
365 static void populate_be_v1_stats(struct be_adapter *adapter)
366 {
367 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
370 struct be_port_rxf_stats_v1 *port_stats =
371 &rxf_stats->port[adapter->port_num];
372 struct be_drv_stats *drvs = &adapter->drv_stats;
373
374 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
375 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
377 drvs->rx_pause_frames = port_stats->rx_pause_frames;
378 drvs->rx_crc_errors = port_stats->rx_crc_errors;
379 drvs->rx_control_frames = port_stats->rx_control_frames;
380 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390 drvs->rx_dropped_header_too_small =
391 port_stats->rx_dropped_header_too_small;
392 drvs->rx_input_fifo_overflow_drop =
393 port_stats->rx_input_fifo_overflow_drop;
394 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
395 drvs->rx_alignment_symbol_errors =
396 port_stats->rx_alignment_symbol_errors;
397 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
398 drvs->tx_pauseframes = port_stats->tx_pauseframes;
399 drvs->tx_controlframes = port_stats->tx_controlframes;
400 drvs->jabber_events = port_stats->jabber_events;
401 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
402 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
403 drvs->forwarded_packets = rxf_stats->forwarded_packets;
404 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
405 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
407 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408 }
409
410 static void populate_lancer_stats(struct be_adapter *adapter)
411 {
412
413 struct be_drv_stats *drvs = &adapter->drv_stats;
414 struct lancer_pport_stats *pport_stats =
415 pport_stats_from_cmd(adapter);
416
417 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
421 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
422 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
423 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427 drvs->rx_dropped_tcp_length =
428 pport_stats->rx_dropped_invalid_tcp_length;
429 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432 drvs->rx_dropped_header_too_small =
433 pport_stats->rx_dropped_header_too_small;
434 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
435 drvs->rx_address_mismatch_drops =
436 pport_stats->rx_address_mismatch_drops +
437 pport_stats->rx_vlan_mismatch_drops;
438 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
439 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
440 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
442 drvs->jabber_events = pport_stats->rx_jabbers;
443 drvs->forwarded_packets = pport_stats->num_forwards_lo;
444 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
445 drvs->rx_drops_too_many_frags =
446 pport_stats->rx_drops_too_many_frags_lo;
447 }
448
449 static void accumulate_16bit_val(u32 *acc, u16 val)
450 {
451 #define lo(x) (x & 0xFFFF)
452 #define hi(x) (x & 0xFFFF0000)
453 bool wrapped = val < lo(*acc);
454 u32 newacc = hi(*acc) + val;
455
456 if (wrapped)
457 newacc += 65536;
458 ACCESS_ONCE(*acc) = newacc;
459 }
460
461 void be_parse_stats(struct be_adapter *adapter)
462 {
463 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464 struct be_rx_obj *rxo;
465 int i;
466
467 if (lancer_chip(adapter)) {
468 populate_lancer_stats(adapter);
469 } else {
470 if (BE2_chip(adapter))
471 populate_be_v0_stats(adapter);
472 else
473 /* for BE3 and Skyhawk */
474 populate_be_v1_stats(adapter);
475
476 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477 for_all_rx_queues(adapter, rxo, i) {
478 /* below erx HW counter can actually wrap around after
479 * 65535. Driver accumulates a 32-bit value
480 */
481 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482 (u16)erx->rx_drops_no_fragments \
483 [rxo->q.id]);
484 }
485 }
486 }
487
488 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489 struct rtnl_link_stats64 *stats)
490 {
491 struct be_adapter *adapter = netdev_priv(netdev);
492 struct be_drv_stats *drvs = &adapter->drv_stats;
493 struct be_rx_obj *rxo;
494 struct be_tx_obj *txo;
495 u64 pkts, bytes;
496 unsigned int start;
497 int i;
498
499 for_all_rx_queues(adapter, rxo, i) {
500 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501 do {
502 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503 pkts = rx_stats(rxo)->rx_pkts;
504 bytes = rx_stats(rxo)->rx_bytes;
505 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506 stats->rx_packets += pkts;
507 stats->rx_bytes += bytes;
508 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510 rx_stats(rxo)->rx_drops_no_frags;
511 }
512
513 for_all_tx_queues(adapter, txo, i) {
514 const struct be_tx_stats *tx_stats = tx_stats(txo);
515 do {
516 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517 pkts = tx_stats(txo)->tx_pkts;
518 bytes = tx_stats(txo)->tx_bytes;
519 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520 stats->tx_packets += pkts;
521 stats->tx_bytes += bytes;
522 }
523
524 /* bad pkts received */
525 stats->rx_errors = drvs->rx_crc_errors +
526 drvs->rx_alignment_symbol_errors +
527 drvs->rx_in_range_errors +
528 drvs->rx_out_range_errors +
529 drvs->rx_frame_too_long +
530 drvs->rx_dropped_too_small +
531 drvs->rx_dropped_too_short +
532 drvs->rx_dropped_header_too_small +
533 drvs->rx_dropped_tcp_length +
534 drvs->rx_dropped_runt;
535
536 /* detailed rx errors */
537 stats->rx_length_errors = drvs->rx_in_range_errors +
538 drvs->rx_out_range_errors +
539 drvs->rx_frame_too_long;
540
541 stats->rx_crc_errors = drvs->rx_crc_errors;
542
543 /* frame alignment errors */
544 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
545
546 /* receiver fifo overrun */
547 /* drops_no_pbuf is no per i/f, it's per BE card */
548 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
549 drvs->rx_input_fifo_overflow_drop +
550 drvs->rx_drops_no_pbuf;
551 return stats;
552 }
553
554 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
555 {
556 struct net_device *netdev = adapter->netdev;
557
558 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
559 netif_carrier_off(netdev);
560 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
561 }
562
563 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564 netif_carrier_on(netdev);
565 else
566 netif_carrier_off(netdev);
567 }
568
569 static void be_tx_stats_update(struct be_tx_obj *txo,
570 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
571 {
572 struct be_tx_stats *stats = tx_stats(txo);
573
574 u64_stats_update_begin(&stats->sync);
575 stats->tx_reqs++;
576 stats->tx_wrbs += wrb_cnt;
577 stats->tx_bytes += copied;
578 stats->tx_pkts += (gso_segs ? gso_segs : 1);
579 if (stopped)
580 stats->tx_stops++;
581 u64_stats_update_end(&stats->sync);
582 }
583
584 /* Determine number of WRB entries needed to xmit data in an skb */
585 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586 bool *dummy)
587 {
588 int cnt = (skb->len > skb->data_len);
589
590 cnt += skb_shinfo(skb)->nr_frags;
591
592 /* to account for hdr wrb */
593 cnt++;
594 if (lancer_chip(adapter) || !(cnt & 1)) {
595 *dummy = false;
596 } else {
597 /* add a dummy to make it an even num */
598 cnt++;
599 *dummy = true;
600 }
601 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602 return cnt;
603 }
604
605 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606 {
607 wrb->frag_pa_hi = upper_32_bits(addr);
608 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
610 wrb->rsvd0 = 0;
611 }
612
613 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614 struct sk_buff *skb)
615 {
616 u8 vlan_prio;
617 u16 vlan_tag;
618
619 vlan_tag = vlan_tx_tag_get(skb);
620 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621 /* If vlan priority provided by OS is NOT in available bmap */
622 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624 adapter->recommended_prio;
625
626 return vlan_tag;
627 }
628
629 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630 {
631 return vlan_tx_tag_present(skb) || adapter->pvid;
632 }
633
634 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635 struct sk_buff *skb, u32 wrb_cnt, u32 len)
636 {
637 u16 vlan_tag;
638
639 memset(hdr, 0, sizeof(*hdr));
640
641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
642
643 if (skb_is_gso(skb)) {
644 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
645 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
646 hdr, skb_shinfo(skb)->gso_size);
647 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
649 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650 if (is_tcp_pkt(skb))
651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652 else if (is_udp_pkt(skb))
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654 }
655
656 if (vlan_tx_tag_present(skb)) {
657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
658 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
659 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
660 }
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666 }
667
668 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
669 bool unmap_single)
670 {
671 dma_addr_t dma;
672
673 be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
676 if (wrb->frag_len) {
677 if (unmap_single)
678 dma_unmap_single(dev, dma, wrb->frag_len,
679 DMA_TO_DEVICE);
680 else
681 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
682 }
683 }
684
685 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
686 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687 {
688 dma_addr_t busaddr;
689 int i, copied = 0;
690 struct device *dev = &adapter->pdev->dev;
691 struct sk_buff *first_skb = skb;
692 struct be_eth_wrb *wrb;
693 struct be_eth_hdr_wrb *hdr;
694 bool map_single = false;
695 u16 map_head;
696
697 hdr = queue_head_node(txq);
698 queue_head_inc(txq);
699 map_head = txq->head;
700
701 if (skb->len > skb->data_len) {
702 int len = skb_headlen(skb);
703 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704 if (dma_mapping_error(dev, busaddr))
705 goto dma_err;
706 map_single = true;
707 wrb = queue_head_node(txq);
708 wrb_fill(wrb, busaddr, len);
709 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710 queue_head_inc(txq);
711 copied += len;
712 }
713
714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
715 const struct skb_frag_struct *frag =
716 &skb_shinfo(skb)->frags[i];
717 busaddr = skb_frag_dma_map(dev, frag, 0,
718 skb_frag_size(frag), DMA_TO_DEVICE);
719 if (dma_mapping_error(dev, busaddr))
720 goto dma_err;
721 wrb = queue_head_node(txq);
722 wrb_fill(wrb, busaddr, skb_frag_size(frag));
723 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724 queue_head_inc(txq);
725 copied += skb_frag_size(frag);
726 }
727
728 if (dummy_wrb) {
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, 0, 0);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 }
734
735 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
736 be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738 return copied;
739 dma_err:
740 txq->head = map_head;
741 while (copied) {
742 wrb = queue_head_node(txq);
743 unmap_tx_frag(dev, wrb, map_single);
744 map_single = false;
745 copied -= wrb->frag_len;
746 queue_head_inc(txq);
747 }
748 return 0;
749 }
750
751 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752 struct sk_buff *skb)
753 {
754 u16 vlan_tag = 0;
755
756 skb = skb_share_check(skb, GFP_ATOMIC);
757 if (unlikely(!skb))
758 return skb;
759
760 if (vlan_tx_tag_present(skb)) {
761 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762 __vlan_put_tag(skb, vlan_tag);
763 skb->vlan_tci = 0;
764 }
765
766 return skb;
767 }
768
769 static netdev_tx_t be_xmit(struct sk_buff *skb,
770 struct net_device *netdev)
771 {
772 struct be_adapter *adapter = netdev_priv(netdev);
773 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
774 struct be_queue_info *txq = &txo->q;
775 struct iphdr *ip = NULL;
776 u32 wrb_cnt = 0, copied = 0;
777 u32 start = txq->head, eth_hdr_len;
778 bool dummy_wrb, stopped = false;
779
780 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
781 VLAN_ETH_HLEN : ETH_HLEN;
782
783 /* HW has a bug which considers padding bytes as legal
784 * and modifies the IPv4 hdr's 'tot_len' field
785 */
786 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
787 is_ipv4_pkt(skb)) {
788 ip = (struct iphdr *)ip_hdr(skb);
789 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
790 }
791
792 /* HW has a bug wherein it will calculate CSUM for VLAN
793 * pkts even though it is disabled.
794 * Manually insert VLAN in pkt.
795 */
796 if (skb->ip_summed != CHECKSUM_PARTIAL &&
797 be_vlan_tag_chk(adapter, skb)) {
798 skb = be_insert_vlan_in_pkt(adapter, skb);
799 if (unlikely(!skb))
800 goto tx_drop;
801 }
802
803 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
804
805 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
806 if (copied) {
807 int gso_segs = skb_shinfo(skb)->gso_segs;
808
809 /* record the sent skb in the sent_skb table */
810 BUG_ON(txo->sent_skb_list[start]);
811 txo->sent_skb_list[start] = skb;
812
813 /* Ensure txq has space for the next skb; Else stop the queue
814 * *BEFORE* ringing the tx doorbell, so that we serialze the
815 * tx compls of the current transmit which'll wake up the queue
816 */
817 atomic_add(wrb_cnt, &txq->used);
818 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
819 txq->len) {
820 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
821 stopped = true;
822 }
823
824 be_txq_notify(adapter, txq->id, wrb_cnt);
825
826 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
827 } else {
828 txq->head = start;
829 dev_kfree_skb_any(skb);
830 }
831 tx_drop:
832 return NETDEV_TX_OK;
833 }
834
835 static int be_change_mtu(struct net_device *netdev, int new_mtu)
836 {
837 struct be_adapter *adapter = netdev_priv(netdev);
838 if (new_mtu < BE_MIN_MTU ||
839 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
840 (ETH_HLEN + ETH_FCS_LEN))) {
841 dev_info(&adapter->pdev->dev,
842 "MTU must be between %d and %d bytes\n",
843 BE_MIN_MTU,
844 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
845 return -EINVAL;
846 }
847 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
848 netdev->mtu, new_mtu);
849 netdev->mtu = new_mtu;
850 return 0;
851 }
852
853 /*
854 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
855 * If the user configures more, place BE in vlan promiscuous mode.
856 */
857 static int be_vid_config(struct be_adapter *adapter)
858 {
859 u16 vids[BE_NUM_VLANS_SUPPORTED];
860 u16 num = 0, i;
861 int status = 0;
862
863 /* No need to further configure vids if in promiscuous mode */
864 if (adapter->promiscuous)
865 return 0;
866
867 if (adapter->vlans_added > adapter->max_vlans)
868 goto set_vlan_promisc;
869
870 /* Construct VLAN Table to give to HW */
871 for (i = 0; i < VLAN_N_VID; i++)
872 if (adapter->vlan_tag[i])
873 vids[num++] = cpu_to_le16(i);
874
875 status = be_cmd_vlan_config(adapter, adapter->if_handle,
876 vids, num, 1, 0);
877
878 /* Set to VLAN promisc mode as setting VLAN filter failed */
879 if (status) {
880 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
881 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
882 goto set_vlan_promisc;
883 }
884
885 return status;
886
887 set_vlan_promisc:
888 status = be_cmd_vlan_config(adapter, adapter->if_handle,
889 NULL, 0, 1, 1);
890 return status;
891 }
892
893 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
894 {
895 struct be_adapter *adapter = netdev_priv(netdev);
896 int status = 0;
897
898 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
899 status = -EINVAL;
900 goto ret;
901 }
902
903 /* Packets with VID 0 are always received by Lancer by default */
904 if (lancer_chip(adapter) && vid == 0)
905 goto ret;
906
907 adapter->vlan_tag[vid] = 1;
908 if (adapter->vlans_added <= (adapter->max_vlans + 1))
909 status = be_vid_config(adapter);
910
911 if (!status)
912 adapter->vlans_added++;
913 else
914 adapter->vlan_tag[vid] = 0;
915 ret:
916 return status;
917 }
918
919 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
920 {
921 struct be_adapter *adapter = netdev_priv(netdev);
922 int status = 0;
923
924 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
925 status = -EINVAL;
926 goto ret;
927 }
928
929 /* Packets with VID 0 are always received by Lancer by default */
930 if (lancer_chip(adapter) && vid == 0)
931 goto ret;
932
933 adapter->vlan_tag[vid] = 0;
934 if (adapter->vlans_added <= adapter->max_vlans)
935 status = be_vid_config(adapter);
936
937 if (!status)
938 adapter->vlans_added--;
939 else
940 adapter->vlan_tag[vid] = 1;
941 ret:
942 return status;
943 }
944
945 static void be_set_rx_mode(struct net_device *netdev)
946 {
947 struct be_adapter *adapter = netdev_priv(netdev);
948 int status;
949
950 if (netdev->flags & IFF_PROMISC) {
951 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
952 adapter->promiscuous = true;
953 goto done;
954 }
955
956 /* BE was previously in promiscuous mode; disable it */
957 if (adapter->promiscuous) {
958 adapter->promiscuous = false;
959 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
960
961 if (adapter->vlans_added)
962 be_vid_config(adapter);
963 }
964
965 /* Enable multicast promisc if num configured exceeds what we support */
966 if (netdev->flags & IFF_ALLMULTI ||
967 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
968 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
969 goto done;
970 }
971
972 if (netdev_uc_count(netdev) != adapter->uc_macs) {
973 struct netdev_hw_addr *ha;
974 int i = 1; /* First slot is claimed by the Primary MAC */
975
976 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
977 be_cmd_pmac_del(adapter, adapter->if_handle,
978 adapter->pmac_id[i], 0);
979 }
980
981 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
982 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
983 adapter->promiscuous = true;
984 goto done;
985 }
986
987 netdev_for_each_uc_addr(ha, adapter->netdev) {
988 adapter->uc_macs++; /* First slot is for Primary MAC */
989 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
990 adapter->if_handle,
991 &adapter->pmac_id[adapter->uc_macs], 0);
992 }
993 }
994
995 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
996
997 /* Set to MCAST promisc mode if setting MULTICAST address fails */
998 if (status) {
999 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1000 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1001 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1002 }
1003 done:
1004 return;
1005 }
1006
1007 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1008 {
1009 struct be_adapter *adapter = netdev_priv(netdev);
1010 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1011 int status;
1012 bool active_mac = false;
1013 u32 pmac_id;
1014 u8 old_mac[ETH_ALEN];
1015
1016 if (!sriov_enabled(adapter))
1017 return -EPERM;
1018
1019 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1020 return -EINVAL;
1021
1022 if (lancer_chip(adapter)) {
1023 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1024 &pmac_id, vf + 1);
1025 if (!status && active_mac)
1026 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027 pmac_id, vf + 1);
1028
1029 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1030 } else {
1031 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1032 vf_cfg->pmac_id, vf + 1);
1033
1034 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1035 &vf_cfg->pmac_id, vf + 1);
1036 }
1037
1038 if (status)
1039 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1040 mac, vf);
1041 else
1042 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1043
1044 return status;
1045 }
1046
1047 static int be_get_vf_config(struct net_device *netdev, int vf,
1048 struct ifla_vf_info *vi)
1049 {
1050 struct be_adapter *adapter = netdev_priv(netdev);
1051 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1052
1053 if (!sriov_enabled(adapter))
1054 return -EPERM;
1055
1056 if (vf >= adapter->num_vfs)
1057 return -EINVAL;
1058
1059 vi->vf = vf;
1060 vi->tx_rate = vf_cfg->tx_rate;
1061 vi->vlan = vf_cfg->vlan_tag;
1062 vi->qos = 0;
1063 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1064
1065 return 0;
1066 }
1067
1068 static int be_set_vf_vlan(struct net_device *netdev,
1069 int vf, u16 vlan, u8 qos)
1070 {
1071 struct be_adapter *adapter = netdev_priv(netdev);
1072 int status = 0;
1073
1074 if (!sriov_enabled(adapter))
1075 return -EPERM;
1076
1077 if (vf >= adapter->num_vfs || vlan > 4095)
1078 return -EINVAL;
1079
1080 if (vlan) {
1081 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1082 /* If this is new value, program it. Else skip. */
1083 adapter->vf_cfg[vf].vlan_tag = vlan;
1084
1085 status = be_cmd_set_hsw_config(adapter, vlan,
1086 vf + 1, adapter->vf_cfg[vf].if_handle);
1087 }
1088 } else {
1089 /* Reset Transparent Vlan Tagging. */
1090 adapter->vf_cfg[vf].vlan_tag = 0;
1091 vlan = adapter->vf_cfg[vf].def_vid;
1092 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1093 adapter->vf_cfg[vf].if_handle);
1094 }
1095
1096
1097 if (status)
1098 dev_info(&adapter->pdev->dev,
1099 "VLAN %d config on VF %d failed\n", vlan, vf);
1100 return status;
1101 }
1102
1103 static int be_set_vf_tx_rate(struct net_device *netdev,
1104 int vf, int rate)
1105 {
1106 struct be_adapter *adapter = netdev_priv(netdev);
1107 int status = 0;
1108
1109 if (!sriov_enabled(adapter))
1110 return -EPERM;
1111
1112 if (vf >= adapter->num_vfs)
1113 return -EINVAL;
1114
1115 if (rate < 100 || rate > 10000) {
1116 dev_err(&adapter->pdev->dev,
1117 "tx rate must be between 100 and 10000 Mbps\n");
1118 return -EINVAL;
1119 }
1120
1121 if (lancer_chip(adapter))
1122 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1123 else
1124 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1125
1126 if (status)
1127 dev_err(&adapter->pdev->dev,
1128 "tx rate %d on VF %d failed\n", rate, vf);
1129 else
1130 adapter->vf_cfg[vf].tx_rate = rate;
1131 return status;
1132 }
1133
1134 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1135 {
1136 struct pci_dev *dev, *pdev = adapter->pdev;
1137 int vfs = 0, assigned_vfs = 0, pos;
1138 u16 offset, stride;
1139
1140 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1141 if (!pos)
1142 return 0;
1143 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1144 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1145
1146 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1147 while (dev) {
1148 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1149 vfs++;
1150 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1151 assigned_vfs++;
1152 }
1153 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1154 }
1155 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1156 }
1157
1158 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1159 {
1160 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1161 ulong now = jiffies;
1162 ulong delta = now - stats->rx_jiffies;
1163 u64 pkts;
1164 unsigned int start, eqd;
1165
1166 if (!eqo->enable_aic) {
1167 eqd = eqo->eqd;
1168 goto modify_eqd;
1169 }
1170
1171 if (eqo->idx >= adapter->num_rx_qs)
1172 return;
1173
1174 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1175
1176 /* Wrapped around */
1177 if (time_before(now, stats->rx_jiffies)) {
1178 stats->rx_jiffies = now;
1179 return;
1180 }
1181
1182 /* Update once a second */
1183 if (delta < HZ)
1184 return;
1185
1186 do {
1187 start = u64_stats_fetch_begin_bh(&stats->sync);
1188 pkts = stats->rx_pkts;
1189 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1190
1191 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1192 stats->rx_pkts_prev = pkts;
1193 stats->rx_jiffies = now;
1194 eqd = (stats->rx_pps / 110000) << 3;
1195 eqd = min(eqd, eqo->max_eqd);
1196 eqd = max(eqd, eqo->min_eqd);
1197 if (eqd < 10)
1198 eqd = 0;
1199
1200 modify_eqd:
1201 if (eqd != eqo->cur_eqd) {
1202 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1203 eqo->cur_eqd = eqd;
1204 }
1205 }
1206
1207 static void be_rx_stats_update(struct be_rx_obj *rxo,
1208 struct be_rx_compl_info *rxcp)
1209 {
1210 struct be_rx_stats *stats = rx_stats(rxo);
1211
1212 u64_stats_update_begin(&stats->sync);
1213 stats->rx_compl++;
1214 stats->rx_bytes += rxcp->pkt_size;
1215 stats->rx_pkts++;
1216 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1217 stats->rx_mcast_pkts++;
1218 if (rxcp->err)
1219 stats->rx_compl_err++;
1220 u64_stats_update_end(&stats->sync);
1221 }
1222
1223 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1224 {
1225 /* L4 checksum is not reliable for non TCP/UDP packets.
1226 * Also ignore ipcksm for ipv6 pkts */
1227 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1228 (rxcp->ip_csum || rxcp->ipv6);
1229 }
1230
1231 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1232 u16 frag_idx)
1233 {
1234 struct be_adapter *adapter = rxo->adapter;
1235 struct be_rx_page_info *rx_page_info;
1236 struct be_queue_info *rxq = &rxo->q;
1237
1238 rx_page_info = &rxo->page_info_tbl[frag_idx];
1239 BUG_ON(!rx_page_info->page);
1240
1241 if (rx_page_info->last_page_user) {
1242 dma_unmap_page(&adapter->pdev->dev,
1243 dma_unmap_addr(rx_page_info, bus),
1244 adapter->big_page_size, DMA_FROM_DEVICE);
1245 rx_page_info->last_page_user = false;
1246 }
1247
1248 atomic_dec(&rxq->used);
1249 return rx_page_info;
1250 }
1251
1252 /* Throwaway the data in the Rx completion */
1253 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1254 struct be_rx_compl_info *rxcp)
1255 {
1256 struct be_queue_info *rxq = &rxo->q;
1257 struct be_rx_page_info *page_info;
1258 u16 i, num_rcvd = rxcp->num_rcvd;
1259
1260 for (i = 0; i < num_rcvd; i++) {
1261 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1262 put_page(page_info->page);
1263 memset(page_info, 0, sizeof(*page_info));
1264 index_inc(&rxcp->rxq_idx, rxq->len);
1265 }
1266 }
1267
1268 /*
1269 * skb_fill_rx_data forms a complete skb for an ether frame
1270 * indicated by rxcp.
1271 */
1272 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1273 struct be_rx_compl_info *rxcp)
1274 {
1275 struct be_queue_info *rxq = &rxo->q;
1276 struct be_rx_page_info *page_info;
1277 u16 i, j;
1278 u16 hdr_len, curr_frag_len, remaining;
1279 u8 *start;
1280
1281 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1282 start = page_address(page_info->page) + page_info->page_offset;
1283 prefetch(start);
1284
1285 /* Copy data in the first descriptor of this completion */
1286 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1287
1288 skb->len = curr_frag_len;
1289 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1290 memcpy(skb->data, start, curr_frag_len);
1291 /* Complete packet has now been moved to data */
1292 put_page(page_info->page);
1293 skb->data_len = 0;
1294 skb->tail += curr_frag_len;
1295 } else {
1296 hdr_len = ETH_HLEN;
1297 memcpy(skb->data, start, hdr_len);
1298 skb_shinfo(skb)->nr_frags = 1;
1299 skb_frag_set_page(skb, 0, page_info->page);
1300 skb_shinfo(skb)->frags[0].page_offset =
1301 page_info->page_offset + hdr_len;
1302 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1303 skb->data_len = curr_frag_len - hdr_len;
1304 skb->truesize += rx_frag_size;
1305 skb->tail += hdr_len;
1306 }
1307 page_info->page = NULL;
1308
1309 if (rxcp->pkt_size <= rx_frag_size) {
1310 BUG_ON(rxcp->num_rcvd != 1);
1311 return;
1312 }
1313
1314 /* More frags present for this completion */
1315 index_inc(&rxcp->rxq_idx, rxq->len);
1316 remaining = rxcp->pkt_size - curr_frag_len;
1317 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1318 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1319 curr_frag_len = min(remaining, rx_frag_size);
1320
1321 /* Coalesce all frags from the same physical page in one slot */
1322 if (page_info->page_offset == 0) {
1323 /* Fresh page */
1324 j++;
1325 skb_frag_set_page(skb, j, page_info->page);
1326 skb_shinfo(skb)->frags[j].page_offset =
1327 page_info->page_offset;
1328 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1329 skb_shinfo(skb)->nr_frags++;
1330 } else {
1331 put_page(page_info->page);
1332 }
1333
1334 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1335 skb->len += curr_frag_len;
1336 skb->data_len += curr_frag_len;
1337 skb->truesize += rx_frag_size;
1338 remaining -= curr_frag_len;
1339 index_inc(&rxcp->rxq_idx, rxq->len);
1340 page_info->page = NULL;
1341 }
1342 BUG_ON(j > MAX_SKB_FRAGS);
1343 }
1344
1345 /* Process the RX completion indicated by rxcp when GRO is disabled */
1346 static void be_rx_compl_process(struct be_rx_obj *rxo,
1347 struct be_rx_compl_info *rxcp)
1348 {
1349 struct be_adapter *adapter = rxo->adapter;
1350 struct net_device *netdev = adapter->netdev;
1351 struct sk_buff *skb;
1352
1353 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1354 if (unlikely(!skb)) {
1355 rx_stats(rxo)->rx_drops_no_skbs++;
1356 be_rx_compl_discard(rxo, rxcp);
1357 return;
1358 }
1359
1360 skb_fill_rx_data(rxo, skb, rxcp);
1361
1362 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1363 skb->ip_summed = CHECKSUM_UNNECESSARY;
1364 else
1365 skb_checksum_none_assert(skb);
1366
1367 skb->protocol = eth_type_trans(skb, netdev);
1368 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1369 if (netdev->features & NETIF_F_RXHASH)
1370 skb->rxhash = rxcp->rss_hash;
1371
1372
1373 if (rxcp->vlanf)
1374 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1375
1376 netif_receive_skb(skb);
1377 }
1378
1379 /* Process the RX completion indicated by rxcp when GRO is enabled */
1380 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1381 struct be_rx_compl_info *rxcp)
1382 {
1383 struct be_adapter *adapter = rxo->adapter;
1384 struct be_rx_page_info *page_info;
1385 struct sk_buff *skb = NULL;
1386 struct be_queue_info *rxq = &rxo->q;
1387 u16 remaining, curr_frag_len;
1388 u16 i, j;
1389
1390 skb = napi_get_frags(napi);
1391 if (!skb) {
1392 be_rx_compl_discard(rxo, rxcp);
1393 return;
1394 }
1395
1396 remaining = rxcp->pkt_size;
1397 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1398 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1399
1400 curr_frag_len = min(remaining, rx_frag_size);
1401
1402 /* Coalesce all frags from the same physical page in one slot */
1403 if (i == 0 || page_info->page_offset == 0) {
1404 /* First frag or Fresh page */
1405 j++;
1406 skb_frag_set_page(skb, j, page_info->page);
1407 skb_shinfo(skb)->frags[j].page_offset =
1408 page_info->page_offset;
1409 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1410 } else {
1411 put_page(page_info->page);
1412 }
1413 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1414 skb->truesize += rx_frag_size;
1415 remaining -= curr_frag_len;
1416 index_inc(&rxcp->rxq_idx, rxq->len);
1417 memset(page_info, 0, sizeof(*page_info));
1418 }
1419 BUG_ON(j > MAX_SKB_FRAGS);
1420
1421 skb_shinfo(skb)->nr_frags = j + 1;
1422 skb->len = rxcp->pkt_size;
1423 skb->data_len = rxcp->pkt_size;
1424 skb->ip_summed = CHECKSUM_UNNECESSARY;
1425 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1426 if (adapter->netdev->features & NETIF_F_RXHASH)
1427 skb->rxhash = rxcp->rss_hash;
1428
1429 if (rxcp->vlanf)
1430 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1431
1432 napi_gro_frags(napi);
1433 }
1434
1435 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1436 struct be_rx_compl_info *rxcp)
1437 {
1438 rxcp->pkt_size =
1439 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1440 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1441 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1442 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1443 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1444 rxcp->ip_csum =
1445 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1446 rxcp->l4_csum =
1447 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1448 rxcp->ipv6 =
1449 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1450 rxcp->rxq_idx =
1451 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1452 rxcp->num_rcvd =
1453 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1454 rxcp->pkt_type =
1455 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1456 rxcp->rss_hash =
1457 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1458 if (rxcp->vlanf) {
1459 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1460 compl);
1461 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1462 compl);
1463 }
1464 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1465 }
1466
1467 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1468 struct be_rx_compl_info *rxcp)
1469 {
1470 rxcp->pkt_size =
1471 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1472 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1473 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1474 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1475 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1476 rxcp->ip_csum =
1477 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1478 rxcp->l4_csum =
1479 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1480 rxcp->ipv6 =
1481 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1482 rxcp->rxq_idx =
1483 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1484 rxcp->num_rcvd =
1485 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1486 rxcp->pkt_type =
1487 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1488 rxcp->rss_hash =
1489 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1490 if (rxcp->vlanf) {
1491 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1492 compl);
1493 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1494 compl);
1495 }
1496 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1497 }
1498
1499 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1500 {
1501 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1502 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1503 struct be_adapter *adapter = rxo->adapter;
1504
1505 /* For checking the valid bit it is Ok to use either definition as the
1506 * valid bit is at the same position in both v0 and v1 Rx compl */
1507 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1508 return NULL;
1509
1510 rmb();
1511 be_dws_le_to_cpu(compl, sizeof(*compl));
1512
1513 if (adapter->be3_native)
1514 be_parse_rx_compl_v1(compl, rxcp);
1515 else
1516 be_parse_rx_compl_v0(compl, rxcp);
1517
1518 if (rxcp->vlanf) {
1519 /* vlanf could be wrongly set in some cards.
1520 * ignore if vtm is not set */
1521 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1522 rxcp->vlanf = 0;
1523
1524 if (!lancer_chip(adapter))
1525 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1526
1527 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1528 !adapter->vlan_tag[rxcp->vlan_tag])
1529 rxcp->vlanf = 0;
1530 }
1531
1532 /* As the compl has been parsed, reset it; we wont touch it again */
1533 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1534
1535 queue_tail_inc(&rxo->cq);
1536 return rxcp;
1537 }
1538
1539 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1540 {
1541 u32 order = get_order(size);
1542
1543 if (order > 0)
1544 gfp |= __GFP_COMP;
1545 return alloc_pages(gfp, order);
1546 }
1547
1548 /*
1549 * Allocate a page, split it to fragments of size rx_frag_size and post as
1550 * receive buffers to BE
1551 */
1552 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1553 {
1554 struct be_adapter *adapter = rxo->adapter;
1555 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1556 struct be_queue_info *rxq = &rxo->q;
1557 struct page *pagep = NULL;
1558 struct be_eth_rx_d *rxd;
1559 u64 page_dmaaddr = 0, frag_dmaaddr;
1560 u32 posted, page_offset = 0;
1561
1562 page_info = &rxo->page_info_tbl[rxq->head];
1563 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1564 if (!pagep) {
1565 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1566 if (unlikely(!pagep)) {
1567 rx_stats(rxo)->rx_post_fail++;
1568 break;
1569 }
1570 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1571 0, adapter->big_page_size,
1572 DMA_FROM_DEVICE);
1573 page_info->page_offset = 0;
1574 } else {
1575 get_page(pagep);
1576 page_info->page_offset = page_offset + rx_frag_size;
1577 }
1578 page_offset = page_info->page_offset;
1579 page_info->page = pagep;
1580 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1581 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1582
1583 rxd = queue_head_node(rxq);
1584 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1585 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1586
1587 /* Any space left in the current big page for another frag? */
1588 if ((page_offset + rx_frag_size + rx_frag_size) >
1589 adapter->big_page_size) {
1590 pagep = NULL;
1591 page_info->last_page_user = true;
1592 }
1593
1594 prev_page_info = page_info;
1595 queue_head_inc(rxq);
1596 page_info = &rxo->page_info_tbl[rxq->head];
1597 }
1598 if (pagep)
1599 prev_page_info->last_page_user = true;
1600
1601 if (posted) {
1602 atomic_add(posted, &rxq->used);
1603 be_rxq_notify(adapter, rxq->id, posted);
1604 } else if (atomic_read(&rxq->used) == 0) {
1605 /* Let be_worker replenish when memory is available */
1606 rxo->rx_post_starved = true;
1607 }
1608 }
1609
1610 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1611 {
1612 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1613
1614 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1615 return NULL;
1616
1617 rmb();
1618 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1619
1620 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1621
1622 queue_tail_inc(tx_cq);
1623 return txcp;
1624 }
1625
1626 static u16 be_tx_compl_process(struct be_adapter *adapter,
1627 struct be_tx_obj *txo, u16 last_index)
1628 {
1629 struct be_queue_info *txq = &txo->q;
1630 struct be_eth_wrb *wrb;
1631 struct sk_buff **sent_skbs = txo->sent_skb_list;
1632 struct sk_buff *sent_skb;
1633 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1634 bool unmap_skb_hdr = true;
1635
1636 sent_skb = sent_skbs[txq->tail];
1637 BUG_ON(!sent_skb);
1638 sent_skbs[txq->tail] = NULL;
1639
1640 /* skip header wrb */
1641 queue_tail_inc(txq);
1642
1643 do {
1644 cur_index = txq->tail;
1645 wrb = queue_tail_node(txq);
1646 unmap_tx_frag(&adapter->pdev->dev, wrb,
1647 (unmap_skb_hdr && skb_headlen(sent_skb)));
1648 unmap_skb_hdr = false;
1649
1650 num_wrbs++;
1651 queue_tail_inc(txq);
1652 } while (cur_index != last_index);
1653
1654 kfree_skb(sent_skb);
1655 return num_wrbs;
1656 }
1657
1658 /* Return the number of events in the event queue */
1659 static inline int events_get(struct be_eq_obj *eqo)
1660 {
1661 struct be_eq_entry *eqe;
1662 int num = 0;
1663
1664 do {
1665 eqe = queue_tail_node(&eqo->q);
1666 if (eqe->evt == 0)
1667 break;
1668
1669 rmb();
1670 eqe->evt = 0;
1671 num++;
1672 queue_tail_inc(&eqo->q);
1673 } while (true);
1674
1675 return num;
1676 }
1677
1678 static int event_handle(struct be_eq_obj *eqo)
1679 {
1680 bool rearm = false;
1681 int num = events_get(eqo);
1682
1683 /* Deal with any spurious interrupts that come without events */
1684 if (!num)
1685 rearm = true;
1686
1687 if (num || msix_enabled(eqo->adapter))
1688 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1689
1690 if (num)
1691 napi_schedule(&eqo->napi);
1692
1693 return num;
1694 }
1695
1696 /* Leaves the EQ is disarmed state */
1697 static void be_eq_clean(struct be_eq_obj *eqo)
1698 {
1699 int num = events_get(eqo);
1700
1701 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1702 }
1703
1704 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1705 {
1706 struct be_rx_page_info *page_info;
1707 struct be_queue_info *rxq = &rxo->q;
1708 struct be_queue_info *rx_cq = &rxo->cq;
1709 struct be_rx_compl_info *rxcp;
1710 u16 tail;
1711
1712 /* First cleanup pending rx completions */
1713 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1714 be_rx_compl_discard(rxo, rxcp);
1715 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1716 }
1717
1718 /* Then free posted rx buffer that were not used */
1719 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1720 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1721 page_info = get_rx_page_info(rxo, tail);
1722 put_page(page_info->page);
1723 memset(page_info, 0, sizeof(*page_info));
1724 }
1725 BUG_ON(atomic_read(&rxq->used));
1726 rxq->tail = rxq->head = 0;
1727 }
1728
1729 static void be_tx_compl_clean(struct be_adapter *adapter)
1730 {
1731 struct be_tx_obj *txo;
1732 struct be_queue_info *txq;
1733 struct be_eth_tx_compl *txcp;
1734 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1735 struct sk_buff *sent_skb;
1736 bool dummy_wrb;
1737 int i, pending_txqs;
1738
1739 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1740 do {
1741 pending_txqs = adapter->num_tx_qs;
1742
1743 for_all_tx_queues(adapter, txo, i) {
1744 txq = &txo->q;
1745 while ((txcp = be_tx_compl_get(&txo->cq))) {
1746 end_idx =
1747 AMAP_GET_BITS(struct amap_eth_tx_compl,
1748 wrb_index, txcp);
1749 num_wrbs += be_tx_compl_process(adapter, txo,
1750 end_idx);
1751 cmpl++;
1752 }
1753 if (cmpl) {
1754 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1755 atomic_sub(num_wrbs, &txq->used);
1756 cmpl = 0;
1757 num_wrbs = 0;
1758 }
1759 if (atomic_read(&txq->used) == 0)
1760 pending_txqs--;
1761 }
1762
1763 if (pending_txqs == 0 || ++timeo > 200)
1764 break;
1765
1766 mdelay(1);
1767 } while (true);
1768
1769 for_all_tx_queues(adapter, txo, i) {
1770 txq = &txo->q;
1771 if (atomic_read(&txq->used))
1772 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1773 atomic_read(&txq->used));
1774
1775 /* free posted tx for which compls will never arrive */
1776 while (atomic_read(&txq->used)) {
1777 sent_skb = txo->sent_skb_list[txq->tail];
1778 end_idx = txq->tail;
1779 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1780 &dummy_wrb);
1781 index_adv(&end_idx, num_wrbs - 1, txq->len);
1782 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1783 atomic_sub(num_wrbs, &txq->used);
1784 }
1785 }
1786 }
1787
1788 static void be_evt_queues_destroy(struct be_adapter *adapter)
1789 {
1790 struct be_eq_obj *eqo;
1791 int i;
1792
1793 for_all_evt_queues(adapter, eqo, i) {
1794 if (eqo->q.created) {
1795 be_eq_clean(eqo);
1796 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1797 }
1798 be_queue_free(adapter, &eqo->q);
1799 }
1800 }
1801
1802 static int be_evt_queues_create(struct be_adapter *adapter)
1803 {
1804 struct be_queue_info *eq;
1805 struct be_eq_obj *eqo;
1806 int i, rc;
1807
1808 adapter->num_evt_qs = num_irqs(adapter);
1809
1810 for_all_evt_queues(adapter, eqo, i) {
1811 eqo->adapter = adapter;
1812 eqo->tx_budget = BE_TX_BUDGET;
1813 eqo->idx = i;
1814 eqo->max_eqd = BE_MAX_EQD;
1815 eqo->enable_aic = true;
1816
1817 eq = &eqo->q;
1818 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1819 sizeof(struct be_eq_entry));
1820 if (rc)
1821 return rc;
1822
1823 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1824 if (rc)
1825 return rc;
1826 }
1827 return 0;
1828 }
1829
1830 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1831 {
1832 struct be_queue_info *q;
1833
1834 q = &adapter->mcc_obj.q;
1835 if (q->created)
1836 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1837 be_queue_free(adapter, q);
1838
1839 q = &adapter->mcc_obj.cq;
1840 if (q->created)
1841 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1842 be_queue_free(adapter, q);
1843 }
1844
1845 /* Must be called only after TX qs are created as MCC shares TX EQ */
1846 static int be_mcc_queues_create(struct be_adapter *adapter)
1847 {
1848 struct be_queue_info *q, *cq;
1849
1850 cq = &adapter->mcc_obj.cq;
1851 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1852 sizeof(struct be_mcc_compl)))
1853 goto err;
1854
1855 /* Use the default EQ for MCC completions */
1856 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1857 goto mcc_cq_free;
1858
1859 q = &adapter->mcc_obj.q;
1860 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1861 goto mcc_cq_destroy;
1862
1863 if (be_cmd_mccq_create(adapter, q, cq))
1864 goto mcc_q_free;
1865
1866 return 0;
1867
1868 mcc_q_free:
1869 be_queue_free(adapter, q);
1870 mcc_cq_destroy:
1871 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1872 mcc_cq_free:
1873 be_queue_free(adapter, cq);
1874 err:
1875 return -1;
1876 }
1877
1878 static void be_tx_queues_destroy(struct be_adapter *adapter)
1879 {
1880 struct be_queue_info *q;
1881 struct be_tx_obj *txo;
1882 u8 i;
1883
1884 for_all_tx_queues(adapter, txo, i) {
1885 q = &txo->q;
1886 if (q->created)
1887 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1888 be_queue_free(adapter, q);
1889
1890 q = &txo->cq;
1891 if (q->created)
1892 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1893 be_queue_free(adapter, q);
1894 }
1895 }
1896
1897 static int be_num_txqs_want(struct be_adapter *adapter)
1898 {
1899 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1900 be_is_mc(adapter) ||
1901 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1902 BE2_chip(adapter))
1903 return 1;
1904 else
1905 return adapter->max_tx_queues;
1906 }
1907
1908 static int be_tx_cqs_create(struct be_adapter *adapter)
1909 {
1910 struct be_queue_info *cq, *eq;
1911 int status;
1912 struct be_tx_obj *txo;
1913 u8 i;
1914
1915 adapter->num_tx_qs = be_num_txqs_want(adapter);
1916 if (adapter->num_tx_qs != MAX_TX_QS) {
1917 rtnl_lock();
1918 netif_set_real_num_tx_queues(adapter->netdev,
1919 adapter->num_tx_qs);
1920 rtnl_unlock();
1921 }
1922
1923 for_all_tx_queues(adapter, txo, i) {
1924 cq = &txo->cq;
1925 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1926 sizeof(struct be_eth_tx_compl));
1927 if (status)
1928 return status;
1929
1930 /* If num_evt_qs is less than num_tx_qs, then more than
1931 * one txq share an eq
1932 */
1933 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1934 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1935 if (status)
1936 return status;
1937 }
1938 return 0;
1939 }
1940
1941 static int be_tx_qs_create(struct be_adapter *adapter)
1942 {
1943 struct be_tx_obj *txo;
1944 int i, status;
1945
1946 for_all_tx_queues(adapter, txo, i) {
1947 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1948 sizeof(struct be_eth_wrb));
1949 if (status)
1950 return status;
1951
1952 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1953 if (status)
1954 return status;
1955 }
1956
1957 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1958 adapter->num_tx_qs);
1959 return 0;
1960 }
1961
1962 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1963 {
1964 struct be_queue_info *q;
1965 struct be_rx_obj *rxo;
1966 int i;
1967
1968 for_all_rx_queues(adapter, rxo, i) {
1969 q = &rxo->cq;
1970 if (q->created)
1971 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1972 be_queue_free(adapter, q);
1973 }
1974 }
1975
1976 static int be_rx_cqs_create(struct be_adapter *adapter)
1977 {
1978 struct be_queue_info *eq, *cq;
1979 struct be_rx_obj *rxo;
1980 int rc, i;
1981
1982 /* We'll create as many RSS rings as there are irqs.
1983 * But when there's only one irq there's no use creating RSS rings
1984 */
1985 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1986 num_irqs(adapter) + 1 : 1;
1987 if (adapter->num_rx_qs != MAX_RX_QS) {
1988 rtnl_lock();
1989 netif_set_real_num_rx_queues(adapter->netdev,
1990 adapter->num_rx_qs);
1991 rtnl_unlock();
1992 }
1993
1994 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1995 for_all_rx_queues(adapter, rxo, i) {
1996 rxo->adapter = adapter;
1997 cq = &rxo->cq;
1998 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1999 sizeof(struct be_eth_rx_compl));
2000 if (rc)
2001 return rc;
2002
2003 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2004 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2005 if (rc)
2006 return rc;
2007 }
2008
2009 dev_info(&adapter->pdev->dev,
2010 "created %d RSS queue(s) and 1 default RX queue\n",
2011 adapter->num_rx_qs - 1);
2012 return 0;
2013 }
2014
2015 static irqreturn_t be_intx(int irq, void *dev)
2016 {
2017 struct be_adapter *adapter = dev;
2018 int num_evts;
2019
2020 /* With INTx only one EQ is used */
2021 num_evts = event_handle(&adapter->eq_obj[0]);
2022 if (num_evts)
2023 return IRQ_HANDLED;
2024 else
2025 return IRQ_NONE;
2026 }
2027
2028 static irqreturn_t be_msix(int irq, void *dev)
2029 {
2030 struct be_eq_obj *eqo = dev;
2031
2032 event_handle(eqo);
2033 return IRQ_HANDLED;
2034 }
2035
2036 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2037 {
2038 return (rxcp->tcpf && !rxcp->err) ? true : false;
2039 }
2040
2041 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2042 int budget)
2043 {
2044 struct be_adapter *adapter = rxo->adapter;
2045 struct be_queue_info *rx_cq = &rxo->cq;
2046 struct be_rx_compl_info *rxcp;
2047 u32 work_done;
2048
2049 for (work_done = 0; work_done < budget; work_done++) {
2050 rxcp = be_rx_compl_get(rxo);
2051 if (!rxcp)
2052 break;
2053
2054 /* Is it a flush compl that has no data */
2055 if (unlikely(rxcp->num_rcvd == 0))
2056 goto loop_continue;
2057
2058 /* Discard compl with partial DMA Lancer B0 */
2059 if (unlikely(!rxcp->pkt_size)) {
2060 be_rx_compl_discard(rxo, rxcp);
2061 goto loop_continue;
2062 }
2063
2064 /* On BE drop pkts that arrive due to imperfect filtering in
2065 * promiscuous mode on some skews
2066 */
2067 if (unlikely(rxcp->port != adapter->port_num &&
2068 !lancer_chip(adapter))) {
2069 be_rx_compl_discard(rxo, rxcp);
2070 goto loop_continue;
2071 }
2072
2073 if (do_gro(rxcp))
2074 be_rx_compl_process_gro(rxo, napi, rxcp);
2075 else
2076 be_rx_compl_process(rxo, rxcp);
2077 loop_continue:
2078 be_rx_stats_update(rxo, rxcp);
2079 }
2080
2081 if (work_done) {
2082 be_cq_notify(adapter, rx_cq->id, true, work_done);
2083
2084 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2085 be_post_rx_frags(rxo, GFP_ATOMIC);
2086 }
2087
2088 return work_done;
2089 }
2090
2091 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2092 int budget, int idx)
2093 {
2094 struct be_eth_tx_compl *txcp;
2095 int num_wrbs = 0, work_done;
2096
2097 for (work_done = 0; work_done < budget; work_done++) {
2098 txcp = be_tx_compl_get(&txo->cq);
2099 if (!txcp)
2100 break;
2101 num_wrbs += be_tx_compl_process(adapter, txo,
2102 AMAP_GET_BITS(struct amap_eth_tx_compl,
2103 wrb_index, txcp));
2104 }
2105
2106 if (work_done) {
2107 be_cq_notify(adapter, txo->cq.id, true, work_done);
2108 atomic_sub(num_wrbs, &txo->q.used);
2109
2110 /* As Tx wrbs have been freed up, wake up netdev queue
2111 * if it was stopped due to lack of tx wrbs. */
2112 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2113 atomic_read(&txo->q.used) < txo->q.len / 2) {
2114 netif_wake_subqueue(adapter->netdev, idx);
2115 }
2116
2117 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2118 tx_stats(txo)->tx_compl += work_done;
2119 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2120 }
2121 return (work_done < budget); /* Done */
2122 }
2123
2124 int be_poll(struct napi_struct *napi, int budget)
2125 {
2126 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2127 struct be_adapter *adapter = eqo->adapter;
2128 int max_work = 0, work, i;
2129 bool tx_done;
2130
2131 /* Process all TXQs serviced by this EQ */
2132 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2133 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2134 eqo->tx_budget, i);
2135 if (!tx_done)
2136 max_work = budget;
2137 }
2138
2139 /* This loop will iterate twice for EQ0 in which
2140 * completions of the last RXQ (default one) are also processed
2141 * For other EQs the loop iterates only once
2142 */
2143 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2144 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2145 max_work = max(work, max_work);
2146 }
2147
2148 if (is_mcc_eqo(eqo))
2149 be_process_mcc(adapter);
2150
2151 if (max_work < budget) {
2152 napi_complete(napi);
2153 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2154 } else {
2155 /* As we'll continue in polling mode, count and clear events */
2156 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2157 }
2158 return max_work;
2159 }
2160
2161 void be_detect_error(struct be_adapter *adapter)
2162 {
2163 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2164 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2165 u32 i;
2166
2167 if (be_crit_error(adapter))
2168 return;
2169
2170 if (lancer_chip(adapter)) {
2171 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2172 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2173 sliport_err1 = ioread32(adapter->db +
2174 SLIPORT_ERROR1_OFFSET);
2175 sliport_err2 = ioread32(adapter->db +
2176 SLIPORT_ERROR2_OFFSET);
2177 }
2178 } else {
2179 pci_read_config_dword(adapter->pdev,
2180 PCICFG_UE_STATUS_LOW, &ue_lo);
2181 pci_read_config_dword(adapter->pdev,
2182 PCICFG_UE_STATUS_HIGH, &ue_hi);
2183 pci_read_config_dword(adapter->pdev,
2184 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2185 pci_read_config_dword(adapter->pdev,
2186 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2187
2188 ue_lo = (ue_lo & ~ue_lo_mask);
2189 ue_hi = (ue_hi & ~ue_hi_mask);
2190 }
2191
2192 /* On certain platforms BE hardware can indicate spurious UEs.
2193 * Allow the h/w to stop working completely in case of a real UE.
2194 * Hence not setting the hw_error for UE detection.
2195 */
2196 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2197 adapter->hw_error = true;
2198 dev_err(&adapter->pdev->dev,
2199 "Error detected in the card\n");
2200 }
2201
2202 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2203 dev_err(&adapter->pdev->dev,
2204 "ERR: sliport status 0x%x\n", sliport_status);
2205 dev_err(&adapter->pdev->dev,
2206 "ERR: sliport error1 0x%x\n", sliport_err1);
2207 dev_err(&adapter->pdev->dev,
2208 "ERR: sliport error2 0x%x\n", sliport_err2);
2209 }
2210
2211 if (ue_lo) {
2212 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2213 if (ue_lo & 1)
2214 dev_err(&adapter->pdev->dev,
2215 "UE: %s bit set\n", ue_status_low_desc[i]);
2216 }
2217 }
2218
2219 if (ue_hi) {
2220 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2221 if (ue_hi & 1)
2222 dev_err(&adapter->pdev->dev,
2223 "UE: %s bit set\n", ue_status_hi_desc[i]);
2224 }
2225 }
2226
2227 }
2228
2229 static void be_msix_disable(struct be_adapter *adapter)
2230 {
2231 if (msix_enabled(adapter)) {
2232 pci_disable_msix(adapter->pdev);
2233 adapter->num_msix_vec = 0;
2234 }
2235 }
2236
2237 static uint be_num_rss_want(struct be_adapter *adapter)
2238 {
2239 u32 num = 0;
2240
2241 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2242 (lancer_chip(adapter) ||
2243 (!sriov_want(adapter) && be_physfn(adapter)))) {
2244 num = adapter->max_rss_queues;
2245 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2246 }
2247 return num;
2248 }
2249
2250 static void be_msix_enable(struct be_adapter *adapter)
2251 {
2252 #define BE_MIN_MSIX_VECTORS 1
2253 int i, status, num_vec, num_roce_vec = 0;
2254 struct device *dev = &adapter->pdev->dev;
2255
2256 /* If RSS queues are not used, need a vec for default RX Q */
2257 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2258 if (be_roce_supported(adapter)) {
2259 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2260 (num_online_cpus() + 1));
2261 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2262 num_vec += num_roce_vec;
2263 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2264 }
2265 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2266
2267 for (i = 0; i < num_vec; i++)
2268 adapter->msix_entries[i].entry = i;
2269
2270 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2271 if (status == 0) {
2272 goto done;
2273 } else if (status >= BE_MIN_MSIX_VECTORS) {
2274 num_vec = status;
2275 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2276 num_vec) == 0)
2277 goto done;
2278 }
2279
2280 dev_warn(dev, "MSIx enable failed\n");
2281 return;
2282 done:
2283 if (be_roce_supported(adapter)) {
2284 if (num_vec > num_roce_vec) {
2285 adapter->num_msix_vec = num_vec - num_roce_vec;
2286 adapter->num_msix_roce_vec =
2287 num_vec - adapter->num_msix_vec;
2288 } else {
2289 adapter->num_msix_vec = num_vec;
2290 adapter->num_msix_roce_vec = 0;
2291 }
2292 } else
2293 adapter->num_msix_vec = num_vec;
2294 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2295 return;
2296 }
2297
2298 static inline int be_msix_vec_get(struct be_adapter *adapter,
2299 struct be_eq_obj *eqo)
2300 {
2301 return adapter->msix_entries[eqo->idx].vector;
2302 }
2303
2304 static int be_msix_register(struct be_adapter *adapter)
2305 {
2306 struct net_device *netdev = adapter->netdev;
2307 struct be_eq_obj *eqo;
2308 int status, i, vec;
2309
2310 for_all_evt_queues(adapter, eqo, i) {
2311 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2312 vec = be_msix_vec_get(adapter, eqo);
2313 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2314 if (status)
2315 goto err_msix;
2316 }
2317
2318 return 0;
2319 err_msix:
2320 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2321 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2322 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2323 status);
2324 be_msix_disable(adapter);
2325 return status;
2326 }
2327
2328 static int be_irq_register(struct be_adapter *adapter)
2329 {
2330 struct net_device *netdev = adapter->netdev;
2331 int status;
2332
2333 if (msix_enabled(adapter)) {
2334 status = be_msix_register(adapter);
2335 if (status == 0)
2336 goto done;
2337 /* INTx is not supported for VF */
2338 if (!be_physfn(adapter))
2339 return status;
2340 }
2341
2342 /* INTx */
2343 netdev->irq = adapter->pdev->irq;
2344 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2345 adapter);
2346 if (status) {
2347 dev_err(&adapter->pdev->dev,
2348 "INTx request IRQ failed - err %d\n", status);
2349 return status;
2350 }
2351 done:
2352 adapter->isr_registered = true;
2353 return 0;
2354 }
2355
2356 static void be_irq_unregister(struct be_adapter *adapter)
2357 {
2358 struct net_device *netdev = adapter->netdev;
2359 struct be_eq_obj *eqo;
2360 int i;
2361
2362 if (!adapter->isr_registered)
2363 return;
2364
2365 /* INTx */
2366 if (!msix_enabled(adapter)) {
2367 free_irq(netdev->irq, adapter);
2368 goto done;
2369 }
2370
2371 /* MSIx */
2372 for_all_evt_queues(adapter, eqo, i)
2373 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2374
2375 done:
2376 adapter->isr_registered = false;
2377 }
2378
2379 static void be_rx_qs_destroy(struct be_adapter *adapter)
2380 {
2381 struct be_queue_info *q;
2382 struct be_rx_obj *rxo;
2383 int i;
2384
2385 for_all_rx_queues(adapter, rxo, i) {
2386 q = &rxo->q;
2387 if (q->created) {
2388 be_cmd_rxq_destroy(adapter, q);
2389 /* After the rxq is invalidated, wait for a grace time
2390 * of 1ms for all dma to end and the flush compl to
2391 * arrive
2392 */
2393 mdelay(1);
2394 be_rx_cq_clean(rxo);
2395 }
2396 be_queue_free(adapter, q);
2397 }
2398 }
2399
2400 static int be_close(struct net_device *netdev)
2401 {
2402 struct be_adapter *adapter = netdev_priv(netdev);
2403 struct be_eq_obj *eqo;
2404 int i;
2405
2406 be_roce_dev_close(adapter);
2407
2408 be_async_mcc_disable(adapter);
2409
2410 if (!lancer_chip(adapter))
2411 be_intr_set(adapter, false);
2412
2413 for_all_evt_queues(adapter, eqo, i) {
2414 napi_disable(&eqo->napi);
2415 if (msix_enabled(adapter))
2416 synchronize_irq(be_msix_vec_get(adapter, eqo));
2417 else
2418 synchronize_irq(netdev->irq);
2419 be_eq_clean(eqo);
2420 }
2421
2422 be_irq_unregister(adapter);
2423
2424 /* Wait for all pending tx completions to arrive so that
2425 * all tx skbs are freed.
2426 */
2427 be_tx_compl_clean(adapter);
2428
2429 be_rx_qs_destroy(adapter);
2430 return 0;
2431 }
2432
2433 static int be_rx_qs_create(struct be_adapter *adapter)
2434 {
2435 struct be_rx_obj *rxo;
2436 int rc, i, j;
2437 u8 rsstable[128];
2438
2439 for_all_rx_queues(adapter, rxo, i) {
2440 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2441 sizeof(struct be_eth_rx_d));
2442 if (rc)
2443 return rc;
2444 }
2445
2446 /* The FW would like the default RXQ to be created first */
2447 rxo = default_rxo(adapter);
2448 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2449 adapter->if_handle, false, &rxo->rss_id);
2450 if (rc)
2451 return rc;
2452
2453 for_all_rss_queues(adapter, rxo, i) {
2454 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2455 rx_frag_size, adapter->if_handle,
2456 true, &rxo->rss_id);
2457 if (rc)
2458 return rc;
2459 }
2460
2461 if (be_multi_rxq(adapter)) {
2462 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2463 for_all_rss_queues(adapter, rxo, i) {
2464 if ((j + i) >= 128)
2465 break;
2466 rsstable[j + i] = rxo->rss_id;
2467 }
2468 }
2469 rc = be_cmd_rss_config(adapter, rsstable, 128);
2470 if (rc)
2471 return rc;
2472 }
2473
2474 /* First time posting */
2475 for_all_rx_queues(adapter, rxo, i)
2476 be_post_rx_frags(rxo, GFP_KERNEL);
2477 return 0;
2478 }
2479
2480 static int be_open(struct net_device *netdev)
2481 {
2482 struct be_adapter *adapter = netdev_priv(netdev);
2483 struct be_eq_obj *eqo;
2484 struct be_rx_obj *rxo;
2485 struct be_tx_obj *txo;
2486 u8 link_status;
2487 int status, i;
2488
2489 status = be_rx_qs_create(adapter);
2490 if (status)
2491 goto err;
2492
2493 be_irq_register(adapter);
2494
2495 if (!lancer_chip(adapter))
2496 be_intr_set(adapter, true);
2497
2498 for_all_rx_queues(adapter, rxo, i)
2499 be_cq_notify(adapter, rxo->cq.id, true, 0);
2500
2501 for_all_tx_queues(adapter, txo, i)
2502 be_cq_notify(adapter, txo->cq.id, true, 0);
2503
2504 be_async_mcc_enable(adapter);
2505
2506 for_all_evt_queues(adapter, eqo, i) {
2507 napi_enable(&eqo->napi);
2508 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2509 }
2510
2511 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2512 if (!status)
2513 be_link_status_update(adapter, link_status);
2514
2515 be_roce_dev_open(adapter);
2516 return 0;
2517 err:
2518 be_close(adapter->netdev);
2519 return -EIO;
2520 }
2521
2522 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2523 {
2524 struct be_dma_mem cmd;
2525 int status = 0;
2526 u8 mac[ETH_ALEN];
2527
2528 memset(mac, 0, ETH_ALEN);
2529
2530 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2531 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2532 GFP_KERNEL);
2533 if (cmd.va == NULL)
2534 return -1;
2535 memset(cmd.va, 0, cmd.size);
2536
2537 if (enable) {
2538 status = pci_write_config_dword(adapter->pdev,
2539 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2540 if (status) {
2541 dev_err(&adapter->pdev->dev,
2542 "Could not enable Wake-on-lan\n");
2543 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2544 cmd.dma);
2545 return status;
2546 }
2547 status = be_cmd_enable_magic_wol(adapter,
2548 adapter->netdev->dev_addr, &cmd);
2549 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2550 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2551 } else {
2552 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2553 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2554 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2555 }
2556
2557 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2558 return status;
2559 }
2560
2561 /*
2562 * Generate a seed MAC address from the PF MAC Address using jhash.
2563 * MAC Address for VFs are assigned incrementally starting from the seed.
2564 * These addresses are programmed in the ASIC by the PF and the VF driver
2565 * queries for the MAC address during its probe.
2566 */
2567 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2568 {
2569 u32 vf;
2570 int status = 0;
2571 u8 mac[ETH_ALEN];
2572 struct be_vf_cfg *vf_cfg;
2573
2574 be_vf_eth_addr_generate(adapter, mac);
2575
2576 for_all_vfs(adapter, vf_cfg, vf) {
2577 if (lancer_chip(adapter)) {
2578 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2579 } else {
2580 status = be_cmd_pmac_add(adapter, mac,
2581 vf_cfg->if_handle,
2582 &vf_cfg->pmac_id, vf + 1);
2583 }
2584
2585 if (status)
2586 dev_err(&adapter->pdev->dev,
2587 "Mac address assignment failed for VF %d\n", vf);
2588 else
2589 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2590
2591 mac[5] += 1;
2592 }
2593 return status;
2594 }
2595
2596 static void be_vf_clear(struct be_adapter *adapter)
2597 {
2598 struct be_vf_cfg *vf_cfg;
2599 u32 vf;
2600
2601 if (be_find_vfs(adapter, ASSIGNED)) {
2602 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2603 goto done;
2604 }
2605
2606 for_all_vfs(adapter, vf_cfg, vf) {
2607 if (lancer_chip(adapter))
2608 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2609 else
2610 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2611 vf_cfg->pmac_id, vf + 1);
2612
2613 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2614 }
2615 pci_disable_sriov(adapter->pdev);
2616 done:
2617 kfree(adapter->vf_cfg);
2618 adapter->num_vfs = 0;
2619 }
2620
2621 static int be_clear(struct be_adapter *adapter)
2622 {
2623 int i = 1;
2624
2625 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2626 cancel_delayed_work_sync(&adapter->work);
2627 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2628 }
2629
2630 if (sriov_enabled(adapter))
2631 be_vf_clear(adapter);
2632
2633 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2634 be_cmd_pmac_del(adapter, adapter->if_handle,
2635 adapter->pmac_id[i], 0);
2636
2637 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2638
2639 be_mcc_queues_destroy(adapter);
2640 be_rx_cqs_destroy(adapter);
2641 be_tx_queues_destroy(adapter);
2642 be_evt_queues_destroy(adapter);
2643
2644 kfree(adapter->pmac_id);
2645 adapter->pmac_id = NULL;
2646
2647 be_msix_disable(adapter);
2648 return 0;
2649 }
2650
2651 static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2652 u32 *cap_flags, u8 domain)
2653 {
2654 bool profile_present = false;
2655 int status;
2656
2657 if (lancer_chip(adapter)) {
2658 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2659 if (!status)
2660 profile_present = true;
2661 }
2662
2663 if (!profile_present)
2664 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2665 BE_IF_FLAGS_MULTICAST;
2666 }
2667
2668 static int be_vf_setup_init(struct be_adapter *adapter)
2669 {
2670 struct be_vf_cfg *vf_cfg;
2671 int vf;
2672
2673 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2674 GFP_KERNEL);
2675 if (!adapter->vf_cfg)
2676 return -ENOMEM;
2677
2678 for_all_vfs(adapter, vf_cfg, vf) {
2679 vf_cfg->if_handle = -1;
2680 vf_cfg->pmac_id = -1;
2681 }
2682 return 0;
2683 }
2684
2685 static int be_vf_setup(struct be_adapter *adapter)
2686 {
2687 struct be_vf_cfg *vf_cfg;
2688 struct device *dev = &adapter->pdev->dev;
2689 u32 cap_flags, en_flags, vf;
2690 u16 def_vlan, lnk_speed;
2691 int status, enabled_vfs;
2692
2693 enabled_vfs = be_find_vfs(adapter, ENABLED);
2694 if (enabled_vfs) {
2695 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2696 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2697 return 0;
2698 }
2699
2700 if (num_vfs > adapter->dev_num_vfs) {
2701 dev_warn(dev, "Device supports %d VFs and not %d\n",
2702 adapter->dev_num_vfs, num_vfs);
2703 num_vfs = adapter->dev_num_vfs;
2704 }
2705
2706 status = pci_enable_sriov(adapter->pdev, num_vfs);
2707 if (!status) {
2708 adapter->num_vfs = num_vfs;
2709 } else {
2710 /* Platform doesn't support SRIOV though device supports it */
2711 dev_warn(dev, "SRIOV enable failed\n");
2712 return 0;
2713 }
2714
2715 status = be_vf_setup_init(adapter);
2716 if (status)
2717 goto err;
2718
2719 for_all_vfs(adapter, vf_cfg, vf) {
2720 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2721
2722 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2723 BE_IF_FLAGS_BROADCAST |
2724 BE_IF_FLAGS_MULTICAST);
2725
2726 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2727 &vf_cfg->if_handle, vf + 1);
2728 if (status)
2729 goto err;
2730 }
2731
2732 if (!enabled_vfs) {
2733 status = be_vf_eth_addr_config(adapter);
2734 if (status)
2735 goto err;
2736 }
2737
2738 for_all_vfs(adapter, vf_cfg, vf) {
2739 lnk_speed = 1000;
2740 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2741 if (status)
2742 goto err;
2743 vf_cfg->tx_rate = lnk_speed * 10;
2744
2745 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2746 vf + 1, vf_cfg->if_handle);
2747 if (status)
2748 goto err;
2749 vf_cfg->def_vid = def_vlan;
2750
2751 be_cmd_enable_vf(adapter, vf + 1);
2752 }
2753 return 0;
2754 err:
2755 return status;
2756 }
2757
2758 static void be_setup_init(struct be_adapter *adapter)
2759 {
2760 adapter->vlan_prio_bmap = 0xff;
2761 adapter->phy.link_speed = -1;
2762 adapter->if_handle = -1;
2763 adapter->be3_native = false;
2764 adapter->promiscuous = false;
2765 adapter->eq_next_idx = 0;
2766
2767 if (be_physfn(adapter))
2768 adapter->cmd_privileges = MAX_PRIVILEGES;
2769 else
2770 adapter->cmd_privileges = MIN_PRIVILEGES;
2771 }
2772
2773 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2774 bool *active_mac, u32 *pmac_id)
2775 {
2776 int status = 0;
2777
2778 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2779 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2780 if (!lancer_chip(adapter) && !be_physfn(adapter))
2781 *active_mac = true;
2782 else
2783 *active_mac = false;
2784
2785 return status;
2786 }
2787
2788 if (lancer_chip(adapter)) {
2789 status = be_cmd_get_mac_from_list(adapter, mac,
2790 active_mac, pmac_id, 0);
2791 if (*active_mac) {
2792 status = be_cmd_mac_addr_query(adapter, mac, false,
2793 if_handle, *pmac_id);
2794 }
2795 } else if (be_physfn(adapter)) {
2796 /* For BE3, for PF get permanent MAC */
2797 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2798 *active_mac = false;
2799 } else {
2800 /* For BE3, for VF get soft MAC assigned by PF*/
2801 status = be_cmd_mac_addr_query(adapter, mac, false,
2802 if_handle, 0);
2803 *active_mac = true;
2804 }
2805 return status;
2806 }
2807
2808 static void be_get_resources(struct be_adapter *adapter)
2809 {
2810 int status;
2811 bool profile_present = false;
2812
2813 if (lancer_chip(adapter)) {
2814 status = be_cmd_get_func_config(adapter);
2815
2816 if (!status)
2817 profile_present = true;
2818 }
2819
2820 if (profile_present) {
2821 /* Sanity fixes for Lancer */
2822 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2823 BE_UC_PMAC_COUNT);
2824 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2825 BE_NUM_VLANS_SUPPORTED);
2826 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2827 BE_MAX_MC);
2828 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2829 MAX_TX_QS);
2830 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2831 BE3_MAX_RSS_QS);
2832 adapter->max_event_queues = min_t(u16,
2833 adapter->max_event_queues,
2834 BE3_MAX_RSS_QS);
2835
2836 if (adapter->max_rss_queues &&
2837 adapter->max_rss_queues == adapter->max_rx_queues)
2838 adapter->max_rss_queues -= 1;
2839
2840 if (adapter->max_event_queues < adapter->max_rss_queues)
2841 adapter->max_rss_queues = adapter->max_event_queues;
2842
2843 } else {
2844 if (be_physfn(adapter))
2845 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2846 else
2847 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2848
2849 if (adapter->function_mode & FLEX10_MODE)
2850 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2851 else
2852 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2853
2854 adapter->max_mcast_mac = BE_MAX_MC;
2855 adapter->max_tx_queues = MAX_TX_QS;
2856 adapter->max_rss_queues = (adapter->be3_native) ?
2857 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2858 adapter->max_event_queues = BE3_MAX_RSS_QS;
2859
2860 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2861 BE_IF_FLAGS_BROADCAST |
2862 BE_IF_FLAGS_MULTICAST |
2863 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2864 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2865 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2866 BE_IF_FLAGS_PROMISCUOUS;
2867
2868 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2869 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2870 }
2871 }
2872
2873 /* Routine to query per function resource limits */
2874 static int be_get_config(struct be_adapter *adapter)
2875 {
2876 int pos, status;
2877 u16 dev_num_vfs;
2878
2879 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2880 &adapter->function_mode,
2881 &adapter->function_caps);
2882 if (status)
2883 goto err;
2884
2885 be_get_resources(adapter);
2886
2887 /* primary mac needs 1 pmac entry */
2888 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2889 sizeof(u32), GFP_KERNEL);
2890 if (!adapter->pmac_id) {
2891 status = -ENOMEM;
2892 goto err;
2893 }
2894
2895 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2896 if (pos) {
2897 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2898 &dev_num_vfs);
2899 if (!lancer_chip(adapter))
2900 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2901 adapter->dev_num_vfs = dev_num_vfs;
2902 }
2903 err:
2904 return status;
2905 }
2906
2907 static int be_setup(struct be_adapter *adapter)
2908 {
2909 struct device *dev = &adapter->pdev->dev;
2910 u32 en_flags;
2911 u32 tx_fc, rx_fc;
2912 int status;
2913 u8 mac[ETH_ALEN];
2914 bool active_mac;
2915
2916 be_setup_init(adapter);
2917
2918 if (!lancer_chip(adapter))
2919 be_cmd_req_native_mode(adapter);
2920
2921 status = be_get_config(adapter);
2922 if (status)
2923 goto err;
2924
2925 be_msix_enable(adapter);
2926
2927 status = be_evt_queues_create(adapter);
2928 if (status)
2929 goto err;
2930
2931 status = be_tx_cqs_create(adapter);
2932 if (status)
2933 goto err;
2934
2935 status = be_rx_cqs_create(adapter);
2936 if (status)
2937 goto err;
2938
2939 status = be_mcc_queues_create(adapter);
2940 if (status)
2941 goto err;
2942
2943 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2944 /* In UMC mode FW does not return right privileges.
2945 * Override with correct privilege equivalent to PF.
2946 */
2947 if (be_is_mc(adapter))
2948 adapter->cmd_privileges = MAX_PRIVILEGES;
2949
2950 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2951 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2952
2953 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2954 en_flags |= BE_IF_FLAGS_RSS;
2955
2956 en_flags = en_flags & adapter->if_cap_flags;
2957
2958 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
2959 &adapter->if_handle, 0);
2960 if (status != 0)
2961 goto err;
2962
2963 memset(mac, 0, ETH_ALEN);
2964 active_mac = false;
2965 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2966 &active_mac, &adapter->pmac_id[0]);
2967 if (status != 0)
2968 goto err;
2969
2970 if (!active_mac) {
2971 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2972 &adapter->pmac_id[0], 0);
2973 if (status != 0)
2974 goto err;
2975 }
2976
2977 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2978 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2979 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2980 }
2981
2982 status = be_tx_qs_create(adapter);
2983 if (status)
2984 goto err;
2985
2986 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2987
2988 if (adapter->vlans_added)
2989 be_vid_config(adapter);
2990
2991 be_set_rx_mode(adapter->netdev);
2992
2993 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2994
2995 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2996 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2997 adapter->rx_fc);
2998
2999 if (be_physfn(adapter) && num_vfs) {
3000 if (adapter->dev_num_vfs)
3001 be_vf_setup(adapter);
3002 else
3003 dev_warn(dev, "device doesn't support SRIOV\n");
3004 }
3005
3006 status = be_cmd_get_phy_info(adapter);
3007 if (!status && be_pause_supported(adapter))
3008 adapter->phy.fc_autoneg = 1;
3009
3010 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3011 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3012 return 0;
3013 err:
3014 be_clear(adapter);
3015 return status;
3016 }
3017
3018 #ifdef CONFIG_NET_POLL_CONTROLLER
3019 static void be_netpoll(struct net_device *netdev)
3020 {
3021 struct be_adapter *adapter = netdev_priv(netdev);
3022 struct be_eq_obj *eqo;
3023 int i;
3024
3025 for_all_evt_queues(adapter, eqo, i)
3026 event_handle(eqo);
3027
3028 return;
3029 }
3030 #endif
3031
3032 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3033 char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3034
3035 static bool be_flash_redboot(struct be_adapter *adapter,
3036 const u8 *p, u32 img_start, int image_size,
3037 int hdr_size)
3038 {
3039 u32 crc_offset;
3040 u8 flashed_crc[4];
3041 int status;
3042
3043 crc_offset = hdr_size + img_start + image_size - 4;
3044
3045 p += crc_offset;
3046
3047 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3048 (image_size - 4));
3049 if (status) {
3050 dev_err(&adapter->pdev->dev,
3051 "could not get crc from flash, not flashing redboot\n");
3052 return false;
3053 }
3054
3055 /*update redboot only if crc does not match*/
3056 if (!memcmp(flashed_crc, p, 4))
3057 return false;
3058 else
3059 return true;
3060 }
3061
3062 static bool phy_flashing_required(struct be_adapter *adapter)
3063 {
3064 return (adapter->phy.phy_type == TN_8022 &&
3065 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3066 }
3067
3068 static bool is_comp_in_ufi(struct be_adapter *adapter,
3069 struct flash_section_info *fsec, int type)
3070 {
3071 int i = 0, img_type = 0;
3072 struct flash_section_info_g2 *fsec_g2 = NULL;
3073
3074 if (BE2_chip(adapter))
3075 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3076
3077 for (i = 0; i < MAX_FLASH_COMP; i++) {
3078 if (fsec_g2)
3079 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3080 else
3081 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3082
3083 if (img_type == type)
3084 return true;
3085 }
3086 return false;
3087
3088 }
3089
3090 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3091 int header_size,
3092 const struct firmware *fw)
3093 {
3094 struct flash_section_info *fsec = NULL;
3095 const u8 *p = fw->data;
3096
3097 p += header_size;
3098 while (p < (fw->data + fw->size)) {
3099 fsec = (struct flash_section_info *)p;
3100 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3101 return fsec;
3102 p += 32;
3103 }
3104 return NULL;
3105 }
3106
3107 static int be_flash(struct be_adapter *adapter, const u8 *img,
3108 struct be_dma_mem *flash_cmd, int optype, int img_size)
3109 {
3110 u32 total_bytes = 0, flash_op, num_bytes = 0;
3111 int status = 0;
3112 struct be_cmd_write_flashrom *req = flash_cmd->va;
3113
3114 total_bytes = img_size;
3115 while (total_bytes) {
3116 num_bytes = min_t(u32, 32*1024, total_bytes);
3117
3118 total_bytes -= num_bytes;
3119
3120 if (!total_bytes) {
3121 if (optype == OPTYPE_PHY_FW)
3122 flash_op = FLASHROM_OPER_PHY_FLASH;
3123 else
3124 flash_op = FLASHROM_OPER_FLASH;
3125 } else {
3126 if (optype == OPTYPE_PHY_FW)
3127 flash_op = FLASHROM_OPER_PHY_SAVE;
3128 else
3129 flash_op = FLASHROM_OPER_SAVE;
3130 }
3131
3132 memcpy(req->data_buf, img, num_bytes);
3133 img += num_bytes;
3134 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3135 flash_op, num_bytes);
3136 if (status) {
3137 if (status == ILLEGAL_IOCTL_REQ &&
3138 optype == OPTYPE_PHY_FW)
3139 break;
3140 dev_err(&adapter->pdev->dev,
3141 "cmd to write to flash rom failed.\n");
3142 return status;
3143 }
3144 }
3145 return 0;
3146 }
3147
3148 /* For BE2 and BE3 */
3149 static int be_flash_BEx(struct be_adapter *adapter,
3150 const struct firmware *fw,
3151 struct be_dma_mem *flash_cmd,
3152 int num_of_images)
3153
3154 {
3155 int status = 0, i, filehdr_size = 0;
3156 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3157 const u8 *p = fw->data;
3158 const struct flash_comp *pflashcomp;
3159 int num_comp, redboot;
3160 struct flash_section_info *fsec = NULL;
3161
3162 struct flash_comp gen3_flash_types[] = {
3163 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3164 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3165 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3166 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3167 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3168 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3169 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3170 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3171 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3172 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3173 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3174 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3175 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3176 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3177 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3178 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3179 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3180 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3181 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3182 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3183 };
3184
3185 struct flash_comp gen2_flash_types[] = {
3186 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3187 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3188 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3189 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3190 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3191 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3192 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3193 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3194 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3195 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3196 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3197 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3198 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3199 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3200 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3201 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3202 };
3203
3204 if (BE3_chip(adapter)) {
3205 pflashcomp = gen3_flash_types;
3206 filehdr_size = sizeof(struct flash_file_hdr_g3);
3207 num_comp = ARRAY_SIZE(gen3_flash_types);
3208 } else {
3209 pflashcomp = gen2_flash_types;
3210 filehdr_size = sizeof(struct flash_file_hdr_g2);
3211 num_comp = ARRAY_SIZE(gen2_flash_types);
3212 }
3213
3214 /* Get flash section info*/
3215 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3216 if (!fsec) {
3217 dev_err(&adapter->pdev->dev,
3218 "Invalid Cookie. UFI corrupted ?\n");
3219 return -1;
3220 }
3221 for (i = 0; i < num_comp; i++) {
3222 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3223 continue;
3224
3225 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3226 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3227 continue;
3228
3229 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3230 !phy_flashing_required(adapter))
3231 continue;
3232
3233 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3234 redboot = be_flash_redboot(adapter, fw->data,
3235 pflashcomp[i].offset, pflashcomp[i].size,
3236 filehdr_size + img_hdrs_size);
3237 if (!redboot)
3238 continue;
3239 }
3240
3241 p = fw->data;
3242 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3243 if (p + pflashcomp[i].size > fw->data + fw->size)
3244 return -1;
3245
3246 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3247 pflashcomp[i].size);
3248 if (status) {
3249 dev_err(&adapter->pdev->dev,
3250 "Flashing section type %d failed.\n",
3251 pflashcomp[i].img_type);
3252 return status;
3253 }
3254 }
3255 return 0;
3256 }
3257
3258 static int be_flash_skyhawk(struct be_adapter *adapter,
3259 const struct firmware *fw,
3260 struct be_dma_mem *flash_cmd, int num_of_images)
3261 {
3262 int status = 0, i, filehdr_size = 0;
3263 int img_offset, img_size, img_optype, redboot;
3264 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3265 const u8 *p = fw->data;
3266 struct flash_section_info *fsec = NULL;
3267
3268 filehdr_size = sizeof(struct flash_file_hdr_g3);
3269 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3270 if (!fsec) {
3271 dev_err(&adapter->pdev->dev,
3272 "Invalid Cookie. UFI corrupted ?\n");
3273 return -1;
3274 }
3275
3276 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3277 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3278 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3279
3280 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3281 case IMAGE_FIRMWARE_iSCSI:
3282 img_optype = OPTYPE_ISCSI_ACTIVE;
3283 break;
3284 case IMAGE_BOOT_CODE:
3285 img_optype = OPTYPE_REDBOOT;
3286 break;
3287 case IMAGE_OPTION_ROM_ISCSI:
3288 img_optype = OPTYPE_BIOS;
3289 break;
3290 case IMAGE_OPTION_ROM_PXE:
3291 img_optype = OPTYPE_PXE_BIOS;
3292 break;
3293 case IMAGE_OPTION_ROM_FCoE:
3294 img_optype = OPTYPE_FCOE_BIOS;
3295 break;
3296 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3297 img_optype = OPTYPE_ISCSI_BACKUP;
3298 break;
3299 case IMAGE_NCSI:
3300 img_optype = OPTYPE_NCSI_FW;
3301 break;
3302 default:
3303 continue;
3304 }
3305
3306 if (img_optype == OPTYPE_REDBOOT) {
3307 redboot = be_flash_redboot(adapter, fw->data,
3308 img_offset, img_size,
3309 filehdr_size + img_hdrs_size);
3310 if (!redboot)
3311 continue;
3312 }
3313
3314 p = fw->data;
3315 p += filehdr_size + img_offset + img_hdrs_size;
3316 if (p + img_size > fw->data + fw->size)
3317 return -1;
3318
3319 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3320 if (status) {
3321 dev_err(&adapter->pdev->dev,
3322 "Flashing section type %d failed.\n",
3323 fsec->fsec_entry[i].type);
3324 return status;
3325 }
3326 }
3327 return 0;
3328 }
3329
3330 static int lancer_wait_idle(struct be_adapter *adapter)
3331 {
3332 #define SLIPORT_IDLE_TIMEOUT 30
3333 u32 reg_val;
3334 int status = 0, i;
3335
3336 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3337 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3338 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3339 break;
3340
3341 ssleep(1);
3342 }
3343
3344 if (i == SLIPORT_IDLE_TIMEOUT)
3345 status = -1;
3346
3347 return status;
3348 }
3349
3350 static int lancer_fw_reset(struct be_adapter *adapter)
3351 {
3352 int status = 0;
3353
3354 status = lancer_wait_idle(adapter);
3355 if (status)
3356 return status;
3357
3358 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3359 PHYSDEV_CONTROL_OFFSET);
3360
3361 return status;
3362 }
3363
3364 static int lancer_fw_download(struct be_adapter *adapter,
3365 const struct firmware *fw)
3366 {
3367 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3368 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3369 struct be_dma_mem flash_cmd;
3370 const u8 *data_ptr = NULL;
3371 u8 *dest_image_ptr = NULL;
3372 size_t image_size = 0;
3373 u32 chunk_size = 0;
3374 u32 data_written = 0;
3375 u32 offset = 0;
3376 int status = 0;
3377 u8 add_status = 0;
3378 u8 change_status;
3379
3380 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3381 dev_err(&adapter->pdev->dev,
3382 "FW Image not properly aligned. "
3383 "Length must be 4 byte aligned.\n");
3384 status = -EINVAL;
3385 goto lancer_fw_exit;
3386 }
3387
3388 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3389 + LANCER_FW_DOWNLOAD_CHUNK;
3390 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3391 &flash_cmd.dma, GFP_KERNEL);
3392 if (!flash_cmd.va) {
3393 status = -ENOMEM;
3394 dev_err(&adapter->pdev->dev,
3395 "Memory allocation failure while flashing\n");
3396 goto lancer_fw_exit;
3397 }
3398
3399 dest_image_ptr = flash_cmd.va +
3400 sizeof(struct lancer_cmd_req_write_object);
3401 image_size = fw->size;
3402 data_ptr = fw->data;
3403
3404 while (image_size) {
3405 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3406
3407 /* Copy the image chunk content. */
3408 memcpy(dest_image_ptr, data_ptr, chunk_size);
3409
3410 status = lancer_cmd_write_object(adapter, &flash_cmd,
3411 chunk_size, offset,
3412 LANCER_FW_DOWNLOAD_LOCATION,
3413 &data_written, &change_status,
3414 &add_status);
3415 if (status)
3416 break;
3417
3418 offset += data_written;
3419 data_ptr += data_written;
3420 image_size -= data_written;
3421 }
3422
3423 if (!status) {
3424 /* Commit the FW written */
3425 status = lancer_cmd_write_object(adapter, &flash_cmd,
3426 0, offset,
3427 LANCER_FW_DOWNLOAD_LOCATION,
3428 &data_written, &change_status,
3429 &add_status);
3430 }
3431
3432 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3433 flash_cmd.dma);
3434 if (status) {
3435 dev_err(&adapter->pdev->dev,
3436 "Firmware load error. "
3437 "Status code: 0x%x Additional Status: 0x%x\n",
3438 status, add_status);
3439 goto lancer_fw_exit;
3440 }
3441
3442 if (change_status == LANCER_FW_RESET_NEEDED) {
3443 status = lancer_fw_reset(adapter);
3444 if (status) {
3445 dev_err(&adapter->pdev->dev,
3446 "Adapter busy for FW reset.\n"
3447 "New FW will not be active.\n");
3448 goto lancer_fw_exit;
3449 }
3450 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3451 dev_err(&adapter->pdev->dev,
3452 "System reboot required for new FW"
3453 " to be active\n");
3454 }
3455
3456 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3457 lancer_fw_exit:
3458 return status;
3459 }
3460
3461 #define UFI_TYPE2 2
3462 #define UFI_TYPE3 3
3463 #define UFI_TYPE4 4
3464 static int be_get_ufi_type(struct be_adapter *adapter,
3465 struct flash_file_hdr_g2 *fhdr)
3466 {
3467 if (fhdr == NULL)
3468 goto be_get_ufi_exit;
3469
3470 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3471 return UFI_TYPE4;
3472 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3473 return UFI_TYPE3;
3474 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3475 return UFI_TYPE2;
3476
3477 be_get_ufi_exit:
3478 dev_err(&adapter->pdev->dev,
3479 "UFI and Interface are not compatible for flashing\n");
3480 return -1;
3481 }
3482
3483 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3484 {
3485 struct flash_file_hdr_g2 *fhdr;
3486 struct flash_file_hdr_g3 *fhdr3;
3487 struct image_hdr *img_hdr_ptr = NULL;
3488 struct be_dma_mem flash_cmd;
3489 const u8 *p;
3490 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3491
3492 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3493 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3494 &flash_cmd.dma, GFP_KERNEL);
3495 if (!flash_cmd.va) {
3496 status = -ENOMEM;
3497 dev_err(&adapter->pdev->dev,
3498 "Memory allocation failure while flashing\n");
3499 goto be_fw_exit;
3500 }
3501
3502 p = fw->data;
3503 fhdr = (struct flash_file_hdr_g2 *)p;
3504
3505 ufi_type = be_get_ufi_type(adapter, fhdr);
3506
3507 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3508 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3509 for (i = 0; i < num_imgs; i++) {
3510 img_hdr_ptr = (struct image_hdr *)(fw->data +
3511 (sizeof(struct flash_file_hdr_g3) +
3512 i * sizeof(struct image_hdr)));
3513 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3514 if (ufi_type == UFI_TYPE4)
3515 status = be_flash_skyhawk(adapter, fw,
3516 &flash_cmd, num_imgs);
3517 else if (ufi_type == UFI_TYPE3)
3518 status = be_flash_BEx(adapter, fw, &flash_cmd,
3519 num_imgs);
3520 }
3521 }
3522
3523 if (ufi_type == UFI_TYPE2)
3524 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3525 else if (ufi_type == -1)
3526 status = -1;
3527
3528 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3529 flash_cmd.dma);
3530 if (status) {
3531 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3532 goto be_fw_exit;
3533 }
3534
3535 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3536
3537 be_fw_exit:
3538 return status;
3539 }
3540
3541 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3542 {
3543 const struct firmware *fw;
3544 int status;
3545
3546 if (!netif_running(adapter->netdev)) {
3547 dev_err(&adapter->pdev->dev,
3548 "Firmware load not allowed (interface is down)\n");
3549 return -1;
3550 }
3551
3552 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3553 if (status)
3554 goto fw_exit;
3555
3556 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3557
3558 if (lancer_chip(adapter))
3559 status = lancer_fw_download(adapter, fw);
3560 else
3561 status = be_fw_download(adapter, fw);
3562
3563 fw_exit:
3564 release_firmware(fw);
3565 return status;
3566 }
3567
3568 static const struct net_device_ops be_netdev_ops = {
3569 .ndo_open = be_open,
3570 .ndo_stop = be_close,
3571 .ndo_start_xmit = be_xmit,
3572 .ndo_set_rx_mode = be_set_rx_mode,
3573 .ndo_set_mac_address = be_mac_addr_set,
3574 .ndo_change_mtu = be_change_mtu,
3575 .ndo_get_stats64 = be_get_stats64,
3576 .ndo_validate_addr = eth_validate_addr,
3577 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3578 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3579 .ndo_set_vf_mac = be_set_vf_mac,
3580 .ndo_set_vf_vlan = be_set_vf_vlan,
3581 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3582 .ndo_get_vf_config = be_get_vf_config,
3583 #ifdef CONFIG_NET_POLL_CONTROLLER
3584 .ndo_poll_controller = be_netpoll,
3585 #endif
3586 };
3587
3588 static void be_netdev_init(struct net_device *netdev)
3589 {
3590 struct be_adapter *adapter = netdev_priv(netdev);
3591 struct be_eq_obj *eqo;
3592 int i;
3593
3594 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3595 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3596 NETIF_F_HW_VLAN_TX;
3597 if (be_multi_rxq(adapter))
3598 netdev->hw_features |= NETIF_F_RXHASH;
3599
3600 netdev->features |= netdev->hw_features |
3601 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3602
3603 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3604 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3605
3606 netdev->priv_flags |= IFF_UNICAST_FLT;
3607
3608 netdev->flags |= IFF_MULTICAST;
3609
3610 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3611
3612 netdev->netdev_ops = &be_netdev_ops;
3613
3614 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3615
3616 for_all_evt_queues(adapter, eqo, i)
3617 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3618 }
3619
3620 static void be_unmap_pci_bars(struct be_adapter *adapter)
3621 {
3622 if (adapter->db)
3623 pci_iounmap(adapter->pdev, adapter->db);
3624 if (adapter->roce_db.base)
3625 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3626 }
3627
3628 static int db_bar(struct be_adapter *adapter)
3629 {
3630 if (lancer_chip(adapter) || !be_physfn(adapter))
3631 return 0;
3632 else
3633 return 4;
3634 }
3635
3636 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3637 {
3638 struct pci_dev *pdev = adapter->pdev;
3639 u8 __iomem *addr;
3640
3641 if (lancer_chip(adapter) && adapter->if_type == SLI_INTF_TYPE_3) {
3642 addr = pci_iomap(pdev, 2, 0);
3643 if (addr == NULL)
3644 return -ENOMEM;
3645
3646 adapter->roce_db.base = addr;
3647 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3648 adapter->roce_db.size = 8192;
3649 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3650 } else if (skyhawk_chip(adapter)) {
3651 adapter->roce_db.size = 4096;
3652 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3653 db_bar(adapter));
3654 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3655 db_bar(adapter));
3656 }
3657 return 0;
3658 }
3659
3660 static int be_map_pci_bars(struct be_adapter *adapter)
3661 {
3662 u8 __iomem *addr;
3663 u32 sli_intf;
3664
3665 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3666 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3667 SLI_INTF_IF_TYPE_SHIFT;
3668
3669 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3670 if (addr == NULL)
3671 goto pci_map_err;
3672 adapter->db = addr;
3673
3674 be_roce_map_pci_bars(adapter);
3675 return 0;
3676
3677 pci_map_err:
3678 be_unmap_pci_bars(adapter);
3679 return -ENOMEM;
3680 }
3681
3682 static void be_ctrl_cleanup(struct be_adapter *adapter)
3683 {
3684 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3685
3686 be_unmap_pci_bars(adapter);
3687
3688 if (mem->va)
3689 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3690 mem->dma);
3691
3692 mem = &adapter->rx_filter;
3693 if (mem->va)
3694 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3695 mem->dma);
3696 }
3697
3698 static int be_ctrl_init(struct be_adapter *adapter)
3699 {
3700 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3701 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3702 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3703 u32 sli_intf;
3704 int status;
3705
3706 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3707 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3708 SLI_INTF_FAMILY_SHIFT;
3709 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3710
3711 status = be_map_pci_bars(adapter);
3712 if (status)
3713 goto done;
3714
3715 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3716 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3717 mbox_mem_alloc->size,
3718 &mbox_mem_alloc->dma,
3719 GFP_KERNEL);
3720 if (!mbox_mem_alloc->va) {
3721 status = -ENOMEM;
3722 goto unmap_pci_bars;
3723 }
3724 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3725 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3726 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3727 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3728
3729 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3730 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3731 &rx_filter->dma, GFP_KERNEL);
3732 if (rx_filter->va == NULL) {
3733 status = -ENOMEM;
3734 goto free_mbox;
3735 }
3736 memset(rx_filter->va, 0, rx_filter->size);
3737 mutex_init(&adapter->mbox_lock);
3738 spin_lock_init(&adapter->mcc_lock);
3739 spin_lock_init(&adapter->mcc_cq_lock);
3740
3741 init_completion(&adapter->flash_compl);
3742 pci_save_state(adapter->pdev);
3743 return 0;
3744
3745 free_mbox:
3746 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3747 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3748
3749 unmap_pci_bars:
3750 be_unmap_pci_bars(adapter);
3751
3752 done:
3753 return status;
3754 }
3755
3756 static void be_stats_cleanup(struct be_adapter *adapter)
3757 {
3758 struct be_dma_mem *cmd = &adapter->stats_cmd;
3759
3760 if (cmd->va)
3761 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3762 cmd->va, cmd->dma);
3763 }
3764
3765 static int be_stats_init(struct be_adapter *adapter)
3766 {
3767 struct be_dma_mem *cmd = &adapter->stats_cmd;
3768
3769 if (lancer_chip(adapter))
3770 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3771 else if (BE2_chip(adapter))
3772 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3773 else
3774 /* BE3 and Skyhawk */
3775 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3776
3777 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3778 GFP_KERNEL);
3779 if (cmd->va == NULL)
3780 return -1;
3781 memset(cmd->va, 0, cmd->size);
3782 return 0;
3783 }
3784
3785 static void __devexit be_remove(struct pci_dev *pdev)
3786 {
3787 struct be_adapter *adapter = pci_get_drvdata(pdev);
3788
3789 if (!adapter)
3790 return;
3791
3792 be_roce_dev_remove(adapter);
3793
3794 cancel_delayed_work_sync(&adapter->func_recovery_work);
3795
3796 unregister_netdev(adapter->netdev);
3797
3798 be_clear(adapter);
3799
3800 /* tell fw we're done with firing cmds */
3801 be_cmd_fw_clean(adapter);
3802
3803 be_stats_cleanup(adapter);
3804
3805 be_ctrl_cleanup(adapter);
3806
3807 pci_disable_pcie_error_reporting(pdev);
3808
3809 pci_set_drvdata(pdev, NULL);
3810 pci_release_regions(pdev);
3811 pci_disable_device(pdev);
3812
3813 free_netdev(adapter->netdev);
3814 }
3815
3816 bool be_is_wol_supported(struct be_adapter *adapter)
3817 {
3818 return ((adapter->wol_cap & BE_WOL_CAP) &&
3819 !be_is_wol_excluded(adapter)) ? true : false;
3820 }
3821
3822 u32 be_get_fw_log_level(struct be_adapter *adapter)
3823 {
3824 struct be_dma_mem extfat_cmd;
3825 struct be_fat_conf_params *cfgs;
3826 int status;
3827 u32 level = 0;
3828 int j;
3829
3830 if (lancer_chip(adapter))
3831 return 0;
3832
3833 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3834 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3835 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3836 &extfat_cmd.dma);
3837
3838 if (!extfat_cmd.va) {
3839 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3840 __func__);
3841 goto err;
3842 }
3843
3844 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3845 if (!status) {
3846 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3847 sizeof(struct be_cmd_resp_hdr));
3848 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3849 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3850 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3851 }
3852 }
3853 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3854 extfat_cmd.dma);
3855 err:
3856 return level;
3857 }
3858
3859 static int be_get_initial_config(struct be_adapter *adapter)
3860 {
3861 int status;
3862 u32 level;
3863
3864 status = be_cmd_get_cntl_attributes(adapter);
3865 if (status)
3866 return status;
3867
3868 status = be_cmd_get_acpi_wol_cap(adapter);
3869 if (status) {
3870 /* in case of a failure to get wol capabillities
3871 * check the exclusion list to determine WOL capability */
3872 if (!be_is_wol_excluded(adapter))
3873 adapter->wol_cap |= BE_WOL_CAP;
3874 }
3875
3876 if (be_is_wol_supported(adapter))
3877 adapter->wol = true;
3878
3879 /* Must be a power of 2 or else MODULO will BUG_ON */
3880 adapter->be_get_temp_freq = 64;
3881
3882 level = be_get_fw_log_level(adapter);
3883 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3884
3885 return 0;
3886 }
3887
3888 static int lancer_recover_func(struct be_adapter *adapter)
3889 {
3890 int status;
3891
3892 status = lancer_test_and_set_rdy_state(adapter);
3893 if (status)
3894 goto err;
3895
3896 if (netif_running(adapter->netdev))
3897 be_close(adapter->netdev);
3898
3899 be_clear(adapter);
3900
3901 adapter->hw_error = false;
3902 adapter->fw_timeout = false;
3903
3904 status = be_setup(adapter);
3905 if (status)
3906 goto err;
3907
3908 if (netif_running(adapter->netdev)) {
3909 status = be_open(adapter->netdev);
3910 if (status)
3911 goto err;
3912 }
3913
3914 dev_err(&adapter->pdev->dev,
3915 "Adapter SLIPORT recovery succeeded\n");
3916 return 0;
3917 err:
3918 if (adapter->eeh_error)
3919 dev_err(&adapter->pdev->dev,
3920 "Adapter SLIPORT recovery failed\n");
3921
3922 return status;
3923 }
3924
3925 static void be_func_recovery_task(struct work_struct *work)
3926 {
3927 struct be_adapter *adapter =
3928 container_of(work, struct be_adapter, func_recovery_work.work);
3929 int status;
3930
3931 be_detect_error(adapter);
3932
3933 if (adapter->hw_error && lancer_chip(adapter)) {
3934
3935 if (adapter->eeh_error)
3936 goto out;
3937
3938 rtnl_lock();
3939 netif_device_detach(adapter->netdev);
3940 rtnl_unlock();
3941
3942 status = lancer_recover_func(adapter);
3943
3944 if (!status)
3945 netif_device_attach(adapter->netdev);
3946 }
3947
3948 out:
3949 schedule_delayed_work(&adapter->func_recovery_work,
3950 msecs_to_jiffies(1000));
3951 }
3952
3953 static void be_worker(struct work_struct *work)
3954 {
3955 struct be_adapter *adapter =
3956 container_of(work, struct be_adapter, work.work);
3957 struct be_rx_obj *rxo;
3958 struct be_eq_obj *eqo;
3959 int i;
3960
3961 /* when interrupts are not yet enabled, just reap any pending
3962 * mcc completions */
3963 if (!netif_running(adapter->netdev)) {
3964 local_bh_disable();
3965 be_process_mcc(adapter);
3966 local_bh_enable();
3967 goto reschedule;
3968 }
3969
3970 if (!adapter->stats_cmd_sent) {
3971 if (lancer_chip(adapter))
3972 lancer_cmd_get_pport_stats(adapter,
3973 &adapter->stats_cmd);
3974 else
3975 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3976 }
3977
3978 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3979 be_cmd_get_die_temperature(adapter);
3980
3981 for_all_rx_queues(adapter, rxo, i) {
3982 if (rxo->rx_post_starved) {
3983 rxo->rx_post_starved = false;
3984 be_post_rx_frags(rxo, GFP_KERNEL);
3985 }
3986 }
3987
3988 for_all_evt_queues(adapter, eqo, i)
3989 be_eqd_update(adapter, eqo);
3990
3991 reschedule:
3992 adapter->work_counter++;
3993 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3994 }
3995
3996 static bool be_reset_required(struct be_adapter *adapter)
3997 {
3998 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3999 }
4000
4001 static char *mc_name(struct be_adapter *adapter)
4002 {
4003 if (adapter->function_mode & FLEX10_MODE)
4004 return "FLEX10";
4005 else if (adapter->function_mode & VNIC_MODE)
4006 return "vNIC";
4007 else if (adapter->function_mode & UMC_ENABLED)
4008 return "UMC";
4009 else
4010 return "";
4011 }
4012
4013 static inline char *func_name(struct be_adapter *adapter)
4014 {
4015 return be_physfn(adapter) ? "PF" : "VF";
4016 }
4017
4018 static int __devinit be_probe(struct pci_dev *pdev,
4019 const struct pci_device_id *pdev_id)
4020 {
4021 int status = 0;
4022 struct be_adapter *adapter;
4023 struct net_device *netdev;
4024 char port_name;
4025
4026 status = pci_enable_device(pdev);
4027 if (status)
4028 goto do_none;
4029
4030 status = pci_request_regions(pdev, DRV_NAME);
4031 if (status)
4032 goto disable_dev;
4033 pci_set_master(pdev);
4034
4035 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4036 if (netdev == NULL) {
4037 status = -ENOMEM;
4038 goto rel_reg;
4039 }
4040 adapter = netdev_priv(netdev);
4041 adapter->pdev = pdev;
4042 pci_set_drvdata(pdev, adapter);
4043 adapter->netdev = netdev;
4044 SET_NETDEV_DEV(netdev, &pdev->dev);
4045
4046 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4047 if (!status) {
4048 netdev->features |= NETIF_F_HIGHDMA;
4049 } else {
4050 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4051 if (status) {
4052 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4053 goto free_netdev;
4054 }
4055 }
4056
4057 status = pci_enable_pcie_error_reporting(pdev);
4058 if (status)
4059 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4060
4061 status = be_ctrl_init(adapter);
4062 if (status)
4063 goto free_netdev;
4064
4065 /* sync up with fw's ready state */
4066 if (be_physfn(adapter)) {
4067 status = be_fw_wait_ready(adapter);
4068 if (status)
4069 goto ctrl_clean;
4070 }
4071
4072 /* tell fw we're ready to fire cmds */
4073 status = be_cmd_fw_init(adapter);
4074 if (status)
4075 goto ctrl_clean;
4076
4077 if (be_reset_required(adapter)) {
4078 status = be_cmd_reset_function(adapter);
4079 if (status)
4080 goto ctrl_clean;
4081 }
4082
4083 /* The INTR bit may be set in the card when probed by a kdump kernel
4084 * after a crash.
4085 */
4086 if (!lancer_chip(adapter))
4087 be_intr_set(adapter, false);
4088
4089 status = be_stats_init(adapter);
4090 if (status)
4091 goto ctrl_clean;
4092
4093 status = be_get_initial_config(adapter);
4094 if (status)
4095 goto stats_clean;
4096
4097 INIT_DELAYED_WORK(&adapter->work, be_worker);
4098 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4099 adapter->rx_fc = adapter->tx_fc = true;
4100
4101 status = be_setup(adapter);
4102 if (status)
4103 goto stats_clean;
4104
4105 be_netdev_init(netdev);
4106 status = register_netdev(netdev);
4107 if (status != 0)
4108 goto unsetup;
4109
4110 be_roce_dev_add(adapter);
4111
4112 schedule_delayed_work(&adapter->func_recovery_work,
4113 msecs_to_jiffies(1000));
4114
4115 be_cmd_query_port_name(adapter, &port_name);
4116
4117 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4118 func_name(adapter), mc_name(adapter), port_name);
4119
4120 return 0;
4121
4122 unsetup:
4123 be_clear(adapter);
4124 stats_clean:
4125 be_stats_cleanup(adapter);
4126 ctrl_clean:
4127 be_ctrl_cleanup(adapter);
4128 free_netdev:
4129 free_netdev(netdev);
4130 pci_set_drvdata(pdev, NULL);
4131 rel_reg:
4132 pci_release_regions(pdev);
4133 disable_dev:
4134 pci_disable_device(pdev);
4135 do_none:
4136 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4137 return status;
4138 }
4139
4140 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4141 {
4142 struct be_adapter *adapter = pci_get_drvdata(pdev);
4143 struct net_device *netdev = adapter->netdev;
4144
4145 if (adapter->wol)
4146 be_setup_wol(adapter, true);
4147
4148 cancel_delayed_work_sync(&adapter->func_recovery_work);
4149
4150 netif_device_detach(netdev);
4151 if (netif_running(netdev)) {
4152 rtnl_lock();
4153 be_close(netdev);
4154 rtnl_unlock();
4155 }
4156 be_clear(adapter);
4157
4158 pci_save_state(pdev);
4159 pci_disable_device(pdev);
4160 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4161 return 0;
4162 }
4163
4164 static int be_resume(struct pci_dev *pdev)
4165 {
4166 int status = 0;
4167 struct be_adapter *adapter = pci_get_drvdata(pdev);
4168 struct net_device *netdev = adapter->netdev;
4169
4170 netif_device_detach(netdev);
4171
4172 status = pci_enable_device(pdev);
4173 if (status)
4174 return status;
4175
4176 pci_set_power_state(pdev, 0);
4177 pci_restore_state(pdev);
4178
4179 /* tell fw we're ready to fire cmds */
4180 status = be_cmd_fw_init(adapter);
4181 if (status)
4182 return status;
4183
4184 be_setup(adapter);
4185 if (netif_running(netdev)) {
4186 rtnl_lock();
4187 be_open(netdev);
4188 rtnl_unlock();
4189 }
4190
4191 schedule_delayed_work(&adapter->func_recovery_work,
4192 msecs_to_jiffies(1000));
4193 netif_device_attach(netdev);
4194
4195 if (adapter->wol)
4196 be_setup_wol(adapter, false);
4197
4198 return 0;
4199 }
4200
4201 /*
4202 * An FLR will stop BE from DMAing any data.
4203 */
4204 static void be_shutdown(struct pci_dev *pdev)
4205 {
4206 struct be_adapter *adapter = pci_get_drvdata(pdev);
4207
4208 if (!adapter)
4209 return;
4210
4211 cancel_delayed_work_sync(&adapter->work);
4212 cancel_delayed_work_sync(&adapter->func_recovery_work);
4213
4214 netif_device_detach(adapter->netdev);
4215
4216 be_cmd_reset_function(adapter);
4217
4218 pci_disable_device(pdev);
4219 }
4220
4221 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4222 pci_channel_state_t state)
4223 {
4224 struct be_adapter *adapter = pci_get_drvdata(pdev);
4225 struct net_device *netdev = adapter->netdev;
4226
4227 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4228
4229 adapter->eeh_error = true;
4230
4231 cancel_delayed_work_sync(&adapter->func_recovery_work);
4232
4233 rtnl_lock();
4234 netif_device_detach(netdev);
4235 rtnl_unlock();
4236
4237 if (netif_running(netdev)) {
4238 rtnl_lock();
4239 be_close(netdev);
4240 rtnl_unlock();
4241 }
4242 be_clear(adapter);
4243
4244 if (state == pci_channel_io_perm_failure)
4245 return PCI_ERS_RESULT_DISCONNECT;
4246
4247 pci_disable_device(pdev);
4248
4249 /* The error could cause the FW to trigger a flash debug dump.
4250 * Resetting the card while flash dump is in progress
4251 * can cause it not to recover; wait for it to finish.
4252 * Wait only for first function as it is needed only once per
4253 * adapter.
4254 */
4255 if (pdev->devfn == 0)
4256 ssleep(30);
4257
4258 return PCI_ERS_RESULT_NEED_RESET;
4259 }
4260
4261 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4262 {
4263 struct be_adapter *adapter = pci_get_drvdata(pdev);
4264 int status;
4265
4266 dev_info(&adapter->pdev->dev, "EEH reset\n");
4267 be_clear_all_error(adapter);
4268
4269 status = pci_enable_device(pdev);
4270 if (status)
4271 return PCI_ERS_RESULT_DISCONNECT;
4272
4273 pci_set_master(pdev);
4274 pci_set_power_state(pdev, 0);
4275 pci_restore_state(pdev);
4276
4277 /* Check if card is ok and fw is ready */
4278 status = be_fw_wait_ready(adapter);
4279 if (status)
4280 return PCI_ERS_RESULT_DISCONNECT;
4281
4282 pci_cleanup_aer_uncorrect_error_status(pdev);
4283 return PCI_ERS_RESULT_RECOVERED;
4284 }
4285
4286 static void be_eeh_resume(struct pci_dev *pdev)
4287 {
4288 int status = 0;
4289 struct be_adapter *adapter = pci_get_drvdata(pdev);
4290 struct net_device *netdev = adapter->netdev;
4291
4292 dev_info(&adapter->pdev->dev, "EEH resume\n");
4293
4294 pci_save_state(pdev);
4295
4296 /* tell fw we're ready to fire cmds */
4297 status = be_cmd_fw_init(adapter);
4298 if (status)
4299 goto err;
4300
4301 status = be_cmd_reset_function(adapter);
4302 if (status)
4303 goto err;
4304
4305 status = be_setup(adapter);
4306 if (status)
4307 goto err;
4308
4309 if (netif_running(netdev)) {
4310 status = be_open(netdev);
4311 if (status)
4312 goto err;
4313 }
4314
4315 schedule_delayed_work(&adapter->func_recovery_work,
4316 msecs_to_jiffies(1000));
4317 netif_device_attach(netdev);
4318 return;
4319 err:
4320 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4321 }
4322
4323 static const struct pci_error_handlers be_eeh_handlers = {
4324 .error_detected = be_eeh_err_detected,
4325 .slot_reset = be_eeh_reset,
4326 .resume = be_eeh_resume,
4327 };
4328
4329 static struct pci_driver be_driver = {
4330 .name = DRV_NAME,
4331 .id_table = be_dev_ids,
4332 .probe = be_probe,
4333 .remove = be_remove,
4334 .suspend = be_suspend,
4335 .resume = be_resume,
4336 .shutdown = be_shutdown,
4337 .err_handler = &be_eeh_handlers
4338 };
4339
4340 static int __init be_init_module(void)
4341 {
4342 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4343 rx_frag_size != 2048) {
4344 printk(KERN_WARNING DRV_NAME
4345 " : Module param rx_frag_size must be 2048/4096/8192."
4346 " Using 2048\n");
4347 rx_frag_size = 2048;
4348 }
4349
4350 return pci_register_driver(&be_driver);
4351 }
4352 module_init(be_init_module);
4353
4354 static void __exit be_exit_module(void)
4355 {
4356 pci_unregister_driver(&be_driver);
4357 }
4358 module_exit(be_exit_module);
This page took 0.286735 seconds and 5 git commands to generate.