2c3685389485e1d754c4b57b9989bfdb45130164
[deliverable/linux.git] / drivers / net / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117 };
118
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 {
121 return (adapter->num_rx_qs > 1);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va)
128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
134 {
135 struct be_dma_mem *mem = &q->dma_mem;
136
137 memset(q, 0, sizeof(*q));
138 q->len = len;
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
143 if (!mem->va)
144 return -1;
145 memset(mem->va, 0, mem->size);
146 return 0;
147 }
148
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 {
151 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152 u32 reg = ioread32(addr);
153 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
154
155 if (adapter->eeh_err)
156 return;
157
158 if (!enabled && enable)
159 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160 else if (enabled && !enable)
161 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162 else
163 return;
164
165 iowrite32(reg, addr);
166 }
167
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
169 {
170 u32 val = 0;
171 val |= qid & DB_RQ_RING_ID_MASK;
172 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
173
174 wmb();
175 iowrite32(val, adapter->db + DB_RQ_OFFSET);
176 }
177
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180 u32 val = 0;
181 val |= qid & DB_TXULP_RING_ID_MASK;
182 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
183
184 wmb();
185 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
186 }
187
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189 bool arm, bool clear_int, u16 num_popped)
190 {
191 u32 val = 0;
192 val |= qid & DB_EQ_RING_ID_MASK;
193 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194 DB_EQ_RING_ID_EXT_MASK_SHIFT);
195
196 if (adapter->eeh_err)
197 return;
198
199 if (arm)
200 val |= 1 << DB_EQ_REARM_SHIFT;
201 if (clear_int)
202 val |= 1 << DB_EQ_CLR_SHIFT;
203 val |= 1 << DB_EQ_EVNT_SHIFT;
204 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205 iowrite32(val, adapter->db + DB_EQ_OFFSET);
206 }
207
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
209 {
210 u32 val = 0;
211 val |= qid & DB_CQ_RING_ID_MASK;
212 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213 DB_CQ_RING_ID_EXT_MASK_SHIFT);
214
215 if (adapter->eeh_err)
216 return;
217
218 if (arm)
219 val |= 1 << DB_CQ_REARM_SHIFT;
220 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221 iowrite32(val, adapter->db + DB_CQ_OFFSET);
222 }
223
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
225 {
226 struct be_adapter *adapter = netdev_priv(netdev);
227 struct sockaddr *addr = p;
228 int status = 0;
229
230 if (!is_valid_ether_addr(addr->sa_data))
231 return -EADDRNOTAVAIL;
232
233 /* MAC addr configuration will be done in hardware for VFs
234 * by their corresponding PFs. Just copy to netdev addr here
235 */
236 if (!be_physfn(adapter))
237 goto netdev_addr;
238
239 status = be_cmd_pmac_del(adapter, adapter->if_handle,
240 adapter->pmac_id, 0);
241 if (status)
242 return status;
243
244 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245 adapter->if_handle, &adapter->pmac_id, 0);
246 netdev_addr:
247 if (!status)
248 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
249
250 return status;
251 }
252
253 void netdev_stats_update(struct be_adapter *adapter)
254 {
255 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
256 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257 struct be_port_rxf_stats *port_stats =
258 &rxf_stats->port[adapter->port_num];
259 struct net_device_stats *dev_stats = &adapter->netdev->stats;
260 struct be_erx_stats *erx_stats = &hw_stats->erx;
261 struct be_rx_obj *rxo;
262 int i;
263
264 memset(dev_stats, 0, sizeof(*dev_stats));
265 for_all_rx_queues(adapter, rxo, i) {
266 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
267 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269 /* no space in linux buffers: best possible approximation */
270 dev_stats->rx_dropped +=
271 erx_stats->rx_drops_no_fragments[rxo->q.id];
272 }
273
274 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
276
277 /* bad pkts received */
278 dev_stats->rx_errors = port_stats->rx_crc_errors +
279 port_stats->rx_alignment_symbol_errors +
280 port_stats->rx_in_range_errors +
281 port_stats->rx_out_range_errors +
282 port_stats->rx_frame_too_long +
283 port_stats->rx_dropped_too_small +
284 port_stats->rx_dropped_too_short +
285 port_stats->rx_dropped_header_too_small +
286 port_stats->rx_dropped_tcp_length +
287 port_stats->rx_dropped_runt +
288 port_stats->rx_tcp_checksum_errs +
289 port_stats->rx_ip_checksum_errs +
290 port_stats->rx_udp_checksum_errs;
291
292 /* detailed rx errors */
293 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
294 port_stats->rx_out_range_errors +
295 port_stats->rx_frame_too_long;
296
297 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
298
299 /* frame alignment errors */
300 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
301
302 /* receiver fifo overrun */
303 /* drops_no_pbuf is no per i/f, it's per BE card */
304 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
305 port_stats->rx_input_fifo_overflow +
306 rxf_stats->rx_drops_no_pbuf;
307 }
308
309 void be_link_status_update(struct be_adapter *adapter, bool link_up)
310 {
311 struct net_device *netdev = adapter->netdev;
312
313 /* If link came up or went down */
314 if (adapter->link_up != link_up) {
315 adapter->link_speed = -1;
316 if (link_up) {
317 netif_carrier_on(netdev);
318 printk(KERN_INFO "%s: Link up\n", netdev->name);
319 } else {
320 netif_carrier_off(netdev);
321 printk(KERN_INFO "%s: Link down\n", netdev->name);
322 }
323 adapter->link_up = link_up;
324 }
325 }
326
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
329 {
330 struct be_eq_obj *rx_eq = &rxo->rx_eq;
331 struct be_rx_stats *stats = &rxo->stats;
332 ulong now = jiffies;
333 u32 eqd;
334
335 if (!rx_eq->enable_aic)
336 return;
337
338 /* Wrapped around */
339 if (time_before(now, stats->rx_fps_jiffies)) {
340 stats->rx_fps_jiffies = now;
341 return;
342 }
343
344 /* Update once a second */
345 if ((now - stats->rx_fps_jiffies) < HZ)
346 return;
347
348 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
349 ((now - stats->rx_fps_jiffies) / HZ);
350
351 stats->rx_fps_jiffies = now;
352 stats->prev_rx_frags = stats->rx_frags;
353 eqd = stats->rx_fps / 110000;
354 eqd = eqd << 3;
355 if (eqd > rx_eq->max_eqd)
356 eqd = rx_eq->max_eqd;
357 if (eqd < rx_eq->min_eqd)
358 eqd = rx_eq->min_eqd;
359 if (eqd < 10)
360 eqd = 0;
361 if (eqd != rx_eq->cur_eqd)
362 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
363
364 rx_eq->cur_eqd = eqd;
365 }
366
367 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368 {
369 u64 rate = bytes;
370
371 do_div(rate, ticks / HZ);
372 rate <<= 3; /* bytes/sec -> bits/sec */
373 do_div(rate, 1000000ul); /* MB/Sec */
374
375 return rate;
376 }
377
378 static void be_tx_rate_update(struct be_adapter *adapter)
379 {
380 struct be_tx_stats *stats = tx_stats(adapter);
381 ulong now = jiffies;
382
383 /* Wrapped around? */
384 if (time_before(now, stats->be_tx_jiffies)) {
385 stats->be_tx_jiffies = now;
386 return;
387 }
388
389 /* Update tx rate once in two seconds */
390 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
391 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392 - stats->be_tx_bytes_prev,
393 now - stats->be_tx_jiffies);
394 stats->be_tx_jiffies = now;
395 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396 }
397 }
398
399 static void be_tx_stats_update(struct be_adapter *adapter,
400 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
401 {
402 struct be_tx_stats *stats = tx_stats(adapter);
403 stats->be_tx_reqs++;
404 stats->be_tx_wrbs += wrb_cnt;
405 stats->be_tx_bytes += copied;
406 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
407 if (stopped)
408 stats->be_tx_stops++;
409 }
410
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413 bool *dummy)
414 {
415 int cnt = (skb->len > skb->data_len);
416
417 cnt += skb_shinfo(skb)->nr_frags;
418
419 /* to account for hdr wrb */
420 cnt++;
421 if (lancer_chip(adapter) || !(cnt & 1)) {
422 *dummy = false;
423 } else {
424 /* add a dummy to make it an even num */
425 cnt++;
426 *dummy = true;
427 }
428 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429 return cnt;
430 }
431
432 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433 {
434 wrb->frag_pa_hi = upper_32_bits(addr);
435 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437 }
438
439 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440 struct sk_buff *skb, u32 wrb_cnt, u32 len)
441 {
442 u8 vlan_prio = 0;
443 u16 vlan_tag = 0;
444
445 memset(hdr, 0, sizeof(*hdr));
446
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
449 if (skb_is_gso(skb)) {
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452 hdr, skb_shinfo(skb)->gso_size);
453 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455 if (lancer_chip(adapter) && adapter->sli_family ==
456 LANCER_A0_SLI_FAMILY) {
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458 if (is_tcp_pkt(skb))
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460 tcpcs, hdr, 1);
461 else if (is_udp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463 udpcs, hdr, 1);
464 }
465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466 if (is_tcp_pkt(skb))
467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468 else if (is_udp_pkt(skb))
469 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470 }
471
472 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
473 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
474 vlan_tag = vlan_tx_tag_get(skb);
475 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476 /* If vlan priority provided by OS is NOT in available bmap */
477 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479 adapter->recommended_prio;
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
481 }
482
483 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487 }
488
489 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
490 bool unmap_single)
491 {
492 dma_addr_t dma;
493
494 be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497 if (wrb->frag_len) {
498 if (unmap_single)
499 dma_unmap_single(dev, dma, wrb->frag_len,
500 DMA_TO_DEVICE);
501 else
502 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
503 }
504 }
505
506 static int make_tx_wrbs(struct be_adapter *adapter,
507 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
508 {
509 dma_addr_t busaddr;
510 int i, copied = 0;
511 struct device *dev = &adapter->pdev->dev;
512 struct sk_buff *first_skb = skb;
513 struct be_queue_info *txq = &adapter->tx_obj.q;
514 struct be_eth_wrb *wrb;
515 struct be_eth_hdr_wrb *hdr;
516 bool map_single = false;
517 u16 map_head;
518
519 hdr = queue_head_node(txq);
520 queue_head_inc(txq);
521 map_head = txq->head;
522
523 if (skb->len > skb->data_len) {
524 int len = skb_headlen(skb);
525 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
526 if (dma_mapping_error(dev, busaddr))
527 goto dma_err;
528 map_single = true;
529 wrb = queue_head_node(txq);
530 wrb_fill(wrb, busaddr, len);
531 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532 queue_head_inc(txq);
533 copied += len;
534 }
535
536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537 struct skb_frag_struct *frag =
538 &skb_shinfo(skb)->frags[i];
539 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540 frag->size, DMA_TO_DEVICE);
541 if (dma_mapping_error(dev, busaddr))
542 goto dma_err;
543 wrb = queue_head_node(txq);
544 wrb_fill(wrb, busaddr, frag->size);
545 be_dws_cpu_to_le(wrb, sizeof(*wrb));
546 queue_head_inc(txq);
547 copied += frag->size;
548 }
549
550 if (dummy_wrb) {
551 wrb = queue_head_node(txq);
552 wrb_fill(wrb, 0, 0);
553 be_dws_cpu_to_le(wrb, sizeof(*wrb));
554 queue_head_inc(txq);
555 }
556
557 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
558 be_dws_cpu_to_le(hdr, sizeof(*hdr));
559
560 return copied;
561 dma_err:
562 txq->head = map_head;
563 while (copied) {
564 wrb = queue_head_node(txq);
565 unmap_tx_frag(dev, wrb, map_single);
566 map_single = false;
567 copied -= wrb->frag_len;
568 queue_head_inc(txq);
569 }
570 return 0;
571 }
572
573 static netdev_tx_t be_xmit(struct sk_buff *skb,
574 struct net_device *netdev)
575 {
576 struct be_adapter *adapter = netdev_priv(netdev);
577 struct be_tx_obj *tx_obj = &adapter->tx_obj;
578 struct be_queue_info *txq = &tx_obj->q;
579 u32 wrb_cnt = 0, copied = 0;
580 u32 start = txq->head;
581 bool dummy_wrb, stopped = false;
582
583 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
584
585 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
586 if (copied) {
587 /* record the sent skb in the sent_skb table */
588 BUG_ON(tx_obj->sent_skb_list[start]);
589 tx_obj->sent_skb_list[start] = skb;
590
591 /* Ensure txq has space for the next skb; Else stop the queue
592 * *BEFORE* ringing the tx doorbell, so that we serialze the
593 * tx compls of the current transmit which'll wake up the queue
594 */
595 atomic_add(wrb_cnt, &txq->used);
596 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
597 txq->len) {
598 netif_stop_queue(netdev);
599 stopped = true;
600 }
601
602 be_txq_notify(adapter, txq->id, wrb_cnt);
603
604 be_tx_stats_update(adapter, wrb_cnt, copied,
605 skb_shinfo(skb)->gso_segs, stopped);
606 } else {
607 txq->head = start;
608 dev_kfree_skb_any(skb);
609 }
610 return NETDEV_TX_OK;
611 }
612
613 static int be_change_mtu(struct net_device *netdev, int new_mtu)
614 {
615 struct be_adapter *adapter = netdev_priv(netdev);
616 if (new_mtu < BE_MIN_MTU ||
617 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
618 (ETH_HLEN + ETH_FCS_LEN))) {
619 dev_info(&adapter->pdev->dev,
620 "MTU must be between %d and %d bytes\n",
621 BE_MIN_MTU,
622 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
623 return -EINVAL;
624 }
625 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
626 netdev->mtu, new_mtu);
627 netdev->mtu = new_mtu;
628 return 0;
629 }
630
631 /*
632 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
633 * If the user configures more, place BE in vlan promiscuous mode.
634 */
635 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
636 {
637 u16 vtag[BE_NUM_VLANS_SUPPORTED];
638 u16 ntags = 0, i;
639 int status = 0;
640 u32 if_handle;
641
642 if (vf) {
643 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
644 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
645 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
646 }
647
648 if (adapter->vlans_added <= adapter->max_vlans) {
649 /* Construct VLAN Table to give to HW */
650 for (i = 0; i < VLAN_N_VID; i++) {
651 if (adapter->vlan_tag[i]) {
652 vtag[ntags] = cpu_to_le16(i);
653 ntags++;
654 }
655 }
656 status = be_cmd_vlan_config(adapter, adapter->if_handle,
657 vtag, ntags, 1, 0);
658 } else {
659 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660 NULL, 0, 1, 1);
661 }
662
663 return status;
664 }
665
666 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
667 {
668 struct be_adapter *adapter = netdev_priv(netdev);
669
670 adapter->vlan_grp = grp;
671 }
672
673 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
674 {
675 struct be_adapter *adapter = netdev_priv(netdev);
676
677 adapter->vlans_added++;
678 if (!be_physfn(adapter))
679 return;
680
681 adapter->vlan_tag[vid] = 1;
682 if (adapter->vlans_added <= (adapter->max_vlans + 1))
683 be_vid_config(adapter, false, 0);
684 }
685
686 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
687 {
688 struct be_adapter *adapter = netdev_priv(netdev);
689
690 adapter->vlans_added--;
691 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
692
693 if (!be_physfn(adapter))
694 return;
695
696 adapter->vlan_tag[vid] = 0;
697 if (adapter->vlans_added <= adapter->max_vlans)
698 be_vid_config(adapter, false, 0);
699 }
700
701 static void be_set_multicast_list(struct net_device *netdev)
702 {
703 struct be_adapter *adapter = netdev_priv(netdev);
704
705 if (netdev->flags & IFF_PROMISC) {
706 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
707 adapter->promiscuous = true;
708 goto done;
709 }
710
711 /* BE was previously in promiscous mode; disable it */
712 if (adapter->promiscuous) {
713 adapter->promiscuous = false;
714 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
715 }
716
717 /* Enable multicast promisc if num configured exceeds what we support */
718 if (netdev->flags & IFF_ALLMULTI ||
719 netdev_mc_count(netdev) > BE_MAX_MC) {
720 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
721 &adapter->mc_cmd_mem);
722 goto done;
723 }
724
725 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
726 &adapter->mc_cmd_mem);
727 done:
728 return;
729 }
730
731 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
732 {
733 struct be_adapter *adapter = netdev_priv(netdev);
734 int status;
735
736 if (!adapter->sriov_enabled)
737 return -EPERM;
738
739 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
740 return -EINVAL;
741
742 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
743 status = be_cmd_pmac_del(adapter,
744 adapter->vf_cfg[vf].vf_if_handle,
745 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
746
747 status = be_cmd_pmac_add(adapter, mac,
748 adapter->vf_cfg[vf].vf_if_handle,
749 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
750
751 if (status)
752 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
753 mac, vf);
754 else
755 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
756
757 return status;
758 }
759
760 static int be_get_vf_config(struct net_device *netdev, int vf,
761 struct ifla_vf_info *vi)
762 {
763 struct be_adapter *adapter = netdev_priv(netdev);
764
765 if (!adapter->sriov_enabled)
766 return -EPERM;
767
768 if (vf >= num_vfs)
769 return -EINVAL;
770
771 vi->vf = vf;
772 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
773 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
774 vi->qos = 0;
775 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
776
777 return 0;
778 }
779
780 static int be_set_vf_vlan(struct net_device *netdev,
781 int vf, u16 vlan, u8 qos)
782 {
783 struct be_adapter *adapter = netdev_priv(netdev);
784 int status = 0;
785
786 if (!adapter->sriov_enabled)
787 return -EPERM;
788
789 if ((vf >= num_vfs) || (vlan > 4095))
790 return -EINVAL;
791
792 if (vlan) {
793 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
794 adapter->vlans_added++;
795 } else {
796 adapter->vf_cfg[vf].vf_vlan_tag = 0;
797 adapter->vlans_added--;
798 }
799
800 status = be_vid_config(adapter, true, vf);
801
802 if (status)
803 dev_info(&adapter->pdev->dev,
804 "VLAN %d config on VF %d failed\n", vlan, vf);
805 return status;
806 }
807
808 static int be_set_vf_tx_rate(struct net_device *netdev,
809 int vf, int rate)
810 {
811 struct be_adapter *adapter = netdev_priv(netdev);
812 int status = 0;
813
814 if (!adapter->sriov_enabled)
815 return -EPERM;
816
817 if ((vf >= num_vfs) || (rate < 0))
818 return -EINVAL;
819
820 if (rate > 10000)
821 rate = 10000;
822
823 adapter->vf_cfg[vf].vf_tx_rate = rate;
824 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
825
826 if (status)
827 dev_info(&adapter->pdev->dev,
828 "tx rate %d on VF %d failed\n", rate, vf);
829 return status;
830 }
831
832 static void be_rx_rate_update(struct be_rx_obj *rxo)
833 {
834 struct be_rx_stats *stats = &rxo->stats;
835 ulong now = jiffies;
836
837 /* Wrapped around */
838 if (time_before(now, stats->rx_jiffies)) {
839 stats->rx_jiffies = now;
840 return;
841 }
842
843 /* Update the rate once in two seconds */
844 if ((now - stats->rx_jiffies) < 2 * HZ)
845 return;
846
847 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
848 now - stats->rx_jiffies);
849 stats->rx_jiffies = now;
850 stats->rx_bytes_prev = stats->rx_bytes;
851 }
852
853 static void be_rx_stats_update(struct be_rx_obj *rxo,
854 struct be_rx_compl_info *rxcp)
855 {
856 struct be_rx_stats *stats = &rxo->stats;
857
858 stats->rx_compl++;
859 stats->rx_frags += rxcp->num_rcvd;
860 stats->rx_bytes += rxcp->pkt_size;
861 stats->rx_pkts++;
862 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
863 stats->rx_mcast_pkts++;
864 if (rxcp->err)
865 stats->rxcp_err++;
866 }
867
868 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
869 {
870 /* L4 checksum is not reliable for non TCP/UDP packets.
871 * Also ignore ipcksm for ipv6 pkts */
872 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
873 (rxcp->ip_csum || rxcp->ipv6);
874 }
875
876 static struct be_rx_page_info *
877 get_rx_page_info(struct be_adapter *adapter,
878 struct be_rx_obj *rxo,
879 u16 frag_idx)
880 {
881 struct be_rx_page_info *rx_page_info;
882 struct be_queue_info *rxq = &rxo->q;
883
884 rx_page_info = &rxo->page_info_tbl[frag_idx];
885 BUG_ON(!rx_page_info->page);
886
887 if (rx_page_info->last_page_user) {
888 dma_unmap_page(&adapter->pdev->dev,
889 dma_unmap_addr(rx_page_info, bus),
890 adapter->big_page_size, DMA_FROM_DEVICE);
891 rx_page_info->last_page_user = false;
892 }
893
894 atomic_dec(&rxq->used);
895 return rx_page_info;
896 }
897
898 /* Throwaway the data in the Rx completion */
899 static void be_rx_compl_discard(struct be_adapter *adapter,
900 struct be_rx_obj *rxo,
901 struct be_rx_compl_info *rxcp)
902 {
903 struct be_queue_info *rxq = &rxo->q;
904 struct be_rx_page_info *page_info;
905 u16 i, num_rcvd = rxcp->num_rcvd;
906
907 for (i = 0; i < num_rcvd; i++) {
908 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
909 put_page(page_info->page);
910 memset(page_info, 0, sizeof(*page_info));
911 index_inc(&rxcp->rxq_idx, rxq->len);
912 }
913 }
914
915 /*
916 * skb_fill_rx_data forms a complete skb for an ether frame
917 * indicated by rxcp.
918 */
919 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
920 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
921 {
922 struct be_queue_info *rxq = &rxo->q;
923 struct be_rx_page_info *page_info;
924 u16 i, j;
925 u16 hdr_len, curr_frag_len, remaining;
926 u8 *start;
927
928 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
929 start = page_address(page_info->page) + page_info->page_offset;
930 prefetch(start);
931
932 /* Copy data in the first descriptor of this completion */
933 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
934
935 /* Copy the header portion into skb_data */
936 hdr_len = min(BE_HDR_LEN, curr_frag_len);
937 memcpy(skb->data, start, hdr_len);
938 skb->len = curr_frag_len;
939 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
940 /* Complete packet has now been moved to data */
941 put_page(page_info->page);
942 skb->data_len = 0;
943 skb->tail += curr_frag_len;
944 } else {
945 skb_shinfo(skb)->nr_frags = 1;
946 skb_shinfo(skb)->frags[0].page = page_info->page;
947 skb_shinfo(skb)->frags[0].page_offset =
948 page_info->page_offset + hdr_len;
949 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
950 skb->data_len = curr_frag_len - hdr_len;
951 skb->tail += hdr_len;
952 }
953 page_info->page = NULL;
954
955 if (rxcp->pkt_size <= rx_frag_size) {
956 BUG_ON(rxcp->num_rcvd != 1);
957 return;
958 }
959
960 /* More frags present for this completion */
961 index_inc(&rxcp->rxq_idx, rxq->len);
962 remaining = rxcp->pkt_size - curr_frag_len;
963 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
964 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
965 curr_frag_len = min(remaining, rx_frag_size);
966
967 /* Coalesce all frags from the same physical page in one slot */
968 if (page_info->page_offset == 0) {
969 /* Fresh page */
970 j++;
971 skb_shinfo(skb)->frags[j].page = page_info->page;
972 skb_shinfo(skb)->frags[j].page_offset =
973 page_info->page_offset;
974 skb_shinfo(skb)->frags[j].size = 0;
975 skb_shinfo(skb)->nr_frags++;
976 } else {
977 put_page(page_info->page);
978 }
979
980 skb_shinfo(skb)->frags[j].size += curr_frag_len;
981 skb->len += curr_frag_len;
982 skb->data_len += curr_frag_len;
983
984 remaining -= curr_frag_len;
985 index_inc(&rxcp->rxq_idx, rxq->len);
986 page_info->page = NULL;
987 }
988 BUG_ON(j > MAX_SKB_FRAGS);
989 }
990
991 /* Process the RX completion indicated by rxcp when GRO is disabled */
992 static void be_rx_compl_process(struct be_adapter *adapter,
993 struct be_rx_obj *rxo,
994 struct be_rx_compl_info *rxcp)
995 {
996 struct sk_buff *skb;
997
998 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
999 if (unlikely(!skb)) {
1000 if (net_ratelimit())
1001 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1002 be_rx_compl_discard(adapter, rxo, rxcp);
1003 return;
1004 }
1005
1006 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1007
1008 if (likely(adapter->rx_csum && csum_passed(rxcp)))
1009 skb->ip_summed = CHECKSUM_UNNECESSARY;
1010 else
1011 skb_checksum_none_assert(skb);
1012
1013 skb->truesize = skb->len + sizeof(struct sk_buff);
1014 skb->protocol = eth_type_trans(skb, adapter->netdev);
1015
1016 if (unlikely(rxcp->vlanf)) {
1017 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1018 kfree_skb(skb);
1019 return;
1020 }
1021 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
1022 } else {
1023 netif_receive_skb(skb);
1024 }
1025 }
1026
1027 /* Process the RX completion indicated by rxcp when GRO is enabled */
1028 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1029 struct be_rx_obj *rxo,
1030 struct be_rx_compl_info *rxcp)
1031 {
1032 struct be_rx_page_info *page_info;
1033 struct sk_buff *skb = NULL;
1034 struct be_queue_info *rxq = &rxo->q;
1035 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1036 u16 remaining, curr_frag_len;
1037 u16 i, j;
1038
1039 skb = napi_get_frags(&eq_obj->napi);
1040 if (!skb) {
1041 be_rx_compl_discard(adapter, rxo, rxcp);
1042 return;
1043 }
1044
1045 remaining = rxcp->pkt_size;
1046 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1047 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1048
1049 curr_frag_len = min(remaining, rx_frag_size);
1050
1051 /* Coalesce all frags from the same physical page in one slot */
1052 if (i == 0 || page_info->page_offset == 0) {
1053 /* First frag or Fresh page */
1054 j++;
1055 skb_shinfo(skb)->frags[j].page = page_info->page;
1056 skb_shinfo(skb)->frags[j].page_offset =
1057 page_info->page_offset;
1058 skb_shinfo(skb)->frags[j].size = 0;
1059 } else {
1060 put_page(page_info->page);
1061 }
1062 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1063
1064 remaining -= curr_frag_len;
1065 index_inc(&rxcp->rxq_idx, rxq->len);
1066 memset(page_info, 0, sizeof(*page_info));
1067 }
1068 BUG_ON(j > MAX_SKB_FRAGS);
1069
1070 skb_shinfo(skb)->nr_frags = j + 1;
1071 skb->len = rxcp->pkt_size;
1072 skb->data_len = rxcp->pkt_size;
1073 skb->truesize += rxcp->pkt_size;
1074 skb->ip_summed = CHECKSUM_UNNECESSARY;
1075
1076 if (likely(!rxcp->vlanf))
1077 napi_gro_frags(&eq_obj->napi);
1078 else
1079 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1080 }
1081
1082 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1083 struct be_eth_rx_compl *compl,
1084 struct be_rx_compl_info *rxcp)
1085 {
1086 rxcp->pkt_size =
1087 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1088 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1089 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1090 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1091 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1092 rxcp->ip_csum =
1093 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1094 rxcp->l4_csum =
1095 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1096 rxcp->ipv6 =
1097 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1098 rxcp->rxq_idx =
1099 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1100 rxcp->num_rcvd =
1101 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1102 rxcp->pkt_type =
1103 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1104 if (rxcp->vlanf) {
1105 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1106 compl);
1107 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1108 compl);
1109 }
1110 }
1111
1112 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1113 struct be_eth_rx_compl *compl,
1114 struct be_rx_compl_info *rxcp)
1115 {
1116 rxcp->pkt_size =
1117 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1118 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1119 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1120 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1121 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1122 rxcp->ip_csum =
1123 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1124 rxcp->l4_csum =
1125 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1126 rxcp->ipv6 =
1127 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1128 rxcp->rxq_idx =
1129 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1130 rxcp->num_rcvd =
1131 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1132 rxcp->pkt_type =
1133 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1134 if (rxcp->vlanf) {
1135 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1136 compl);
1137 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1138 compl);
1139 }
1140 }
1141
1142 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1143 {
1144 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1145 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1146 struct be_adapter *adapter = rxo->adapter;
1147
1148 /* For checking the valid bit it is Ok to use either definition as the
1149 * valid bit is at the same position in both v0 and v1 Rx compl */
1150 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1151 return NULL;
1152
1153 rmb();
1154 be_dws_le_to_cpu(compl, sizeof(*compl));
1155
1156 if (adapter->be3_native)
1157 be_parse_rx_compl_v1(adapter, compl, rxcp);
1158 else
1159 be_parse_rx_compl_v0(adapter, compl, rxcp);
1160
1161 if (rxcp->vlanf) {
1162 /* vlanf could be wrongly set in some cards.
1163 * ignore if vtm is not set */
1164 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1165 rxcp->vlanf = 0;
1166
1167 if (!lancer_chip(adapter))
1168 rxcp->vid = swab16(rxcp->vid);
1169
1170 if ((adapter->pvid == rxcp->vid) &&
1171 !adapter->vlan_tag[rxcp->vid])
1172 rxcp->vlanf = 0;
1173 }
1174
1175 /* As the compl has been parsed, reset it; we wont touch it again */
1176 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1177
1178 queue_tail_inc(&rxo->cq);
1179 return rxcp;
1180 }
1181
1182 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1183 {
1184 u32 order = get_order(size);
1185
1186 if (order > 0)
1187 gfp |= __GFP_COMP;
1188 return alloc_pages(gfp, order);
1189 }
1190
1191 /*
1192 * Allocate a page, split it to fragments of size rx_frag_size and post as
1193 * receive buffers to BE
1194 */
1195 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1196 {
1197 struct be_adapter *adapter = rxo->adapter;
1198 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1199 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1200 struct be_queue_info *rxq = &rxo->q;
1201 struct page *pagep = NULL;
1202 struct be_eth_rx_d *rxd;
1203 u64 page_dmaaddr = 0, frag_dmaaddr;
1204 u32 posted, page_offset = 0;
1205
1206 page_info = &rxo->page_info_tbl[rxq->head];
1207 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1208 if (!pagep) {
1209 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1210 if (unlikely(!pagep)) {
1211 rxo->stats.rx_post_fail++;
1212 break;
1213 }
1214 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1215 0, adapter->big_page_size,
1216 DMA_FROM_DEVICE);
1217 page_info->page_offset = 0;
1218 } else {
1219 get_page(pagep);
1220 page_info->page_offset = page_offset + rx_frag_size;
1221 }
1222 page_offset = page_info->page_offset;
1223 page_info->page = pagep;
1224 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1225 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1226
1227 rxd = queue_head_node(rxq);
1228 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1229 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1230
1231 /* Any space left in the current big page for another frag? */
1232 if ((page_offset + rx_frag_size + rx_frag_size) >
1233 adapter->big_page_size) {
1234 pagep = NULL;
1235 page_info->last_page_user = true;
1236 }
1237
1238 prev_page_info = page_info;
1239 queue_head_inc(rxq);
1240 page_info = &page_info_tbl[rxq->head];
1241 }
1242 if (pagep)
1243 prev_page_info->last_page_user = true;
1244
1245 if (posted) {
1246 atomic_add(posted, &rxq->used);
1247 be_rxq_notify(adapter, rxq->id, posted);
1248 } else if (atomic_read(&rxq->used) == 0) {
1249 /* Let be_worker replenish when memory is available */
1250 rxo->rx_post_starved = true;
1251 }
1252 }
1253
1254 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1255 {
1256 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1257
1258 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1259 return NULL;
1260
1261 rmb();
1262 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1263
1264 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1265
1266 queue_tail_inc(tx_cq);
1267 return txcp;
1268 }
1269
1270 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1271 {
1272 struct be_queue_info *txq = &adapter->tx_obj.q;
1273 struct be_eth_wrb *wrb;
1274 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1275 struct sk_buff *sent_skb;
1276 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1277 bool unmap_skb_hdr = true;
1278
1279 sent_skb = sent_skbs[txq->tail];
1280 BUG_ON(!sent_skb);
1281 sent_skbs[txq->tail] = NULL;
1282
1283 /* skip header wrb */
1284 queue_tail_inc(txq);
1285
1286 do {
1287 cur_index = txq->tail;
1288 wrb = queue_tail_node(txq);
1289 unmap_tx_frag(&adapter->pdev->dev, wrb,
1290 (unmap_skb_hdr && skb_headlen(sent_skb)));
1291 unmap_skb_hdr = false;
1292
1293 num_wrbs++;
1294 queue_tail_inc(txq);
1295 } while (cur_index != last_index);
1296
1297 atomic_sub(num_wrbs, &txq->used);
1298
1299 kfree_skb(sent_skb);
1300 }
1301
1302 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1303 {
1304 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1305
1306 if (!eqe->evt)
1307 return NULL;
1308
1309 rmb();
1310 eqe->evt = le32_to_cpu(eqe->evt);
1311 queue_tail_inc(&eq_obj->q);
1312 return eqe;
1313 }
1314
1315 static int event_handle(struct be_adapter *adapter,
1316 struct be_eq_obj *eq_obj)
1317 {
1318 struct be_eq_entry *eqe;
1319 u16 num = 0;
1320
1321 while ((eqe = event_get(eq_obj)) != NULL) {
1322 eqe->evt = 0;
1323 num++;
1324 }
1325
1326 /* Deal with any spurious interrupts that come
1327 * without events
1328 */
1329 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1330 if (num)
1331 napi_schedule(&eq_obj->napi);
1332
1333 return num;
1334 }
1335
1336 /* Just read and notify events without processing them.
1337 * Used at the time of destroying event queues */
1338 static void be_eq_clean(struct be_adapter *adapter,
1339 struct be_eq_obj *eq_obj)
1340 {
1341 struct be_eq_entry *eqe;
1342 u16 num = 0;
1343
1344 while ((eqe = event_get(eq_obj)) != NULL) {
1345 eqe->evt = 0;
1346 num++;
1347 }
1348
1349 if (num)
1350 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1351 }
1352
1353 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1354 {
1355 struct be_rx_page_info *page_info;
1356 struct be_queue_info *rxq = &rxo->q;
1357 struct be_queue_info *rx_cq = &rxo->cq;
1358 struct be_rx_compl_info *rxcp;
1359 u16 tail;
1360
1361 /* First cleanup pending rx completions */
1362 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1363 be_rx_compl_discard(adapter, rxo, rxcp);
1364 be_cq_notify(adapter, rx_cq->id, false, 1);
1365 }
1366
1367 /* Then free posted rx buffer that were not used */
1368 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1369 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1370 page_info = get_rx_page_info(adapter, rxo, tail);
1371 put_page(page_info->page);
1372 memset(page_info, 0, sizeof(*page_info));
1373 }
1374 BUG_ON(atomic_read(&rxq->used));
1375 }
1376
1377 static void be_tx_compl_clean(struct be_adapter *adapter)
1378 {
1379 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1380 struct be_queue_info *txq = &adapter->tx_obj.q;
1381 struct be_eth_tx_compl *txcp;
1382 u16 end_idx, cmpl = 0, timeo = 0;
1383 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1384 struct sk_buff *sent_skb;
1385 bool dummy_wrb;
1386
1387 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1388 do {
1389 while ((txcp = be_tx_compl_get(tx_cq))) {
1390 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1391 wrb_index, txcp);
1392 be_tx_compl_process(adapter, end_idx);
1393 cmpl++;
1394 }
1395 if (cmpl) {
1396 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1397 cmpl = 0;
1398 }
1399
1400 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1401 break;
1402
1403 mdelay(1);
1404 } while (true);
1405
1406 if (atomic_read(&txq->used))
1407 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1408 atomic_read(&txq->used));
1409
1410 /* free posted tx for which compls will never arrive */
1411 while (atomic_read(&txq->used)) {
1412 sent_skb = sent_skbs[txq->tail];
1413 end_idx = txq->tail;
1414 index_adv(&end_idx,
1415 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1416 txq->len);
1417 be_tx_compl_process(adapter, end_idx);
1418 }
1419 }
1420
1421 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1422 {
1423 struct be_queue_info *q;
1424
1425 q = &adapter->mcc_obj.q;
1426 if (q->created)
1427 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1428 be_queue_free(adapter, q);
1429
1430 q = &adapter->mcc_obj.cq;
1431 if (q->created)
1432 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1433 be_queue_free(adapter, q);
1434 }
1435
1436 /* Must be called only after TX qs are created as MCC shares TX EQ */
1437 static int be_mcc_queues_create(struct be_adapter *adapter)
1438 {
1439 struct be_queue_info *q, *cq;
1440
1441 /* Alloc MCC compl queue */
1442 cq = &adapter->mcc_obj.cq;
1443 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1444 sizeof(struct be_mcc_compl)))
1445 goto err;
1446
1447 /* Ask BE to create MCC compl queue; share TX's eq */
1448 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1449 goto mcc_cq_free;
1450
1451 /* Alloc MCC queue */
1452 q = &adapter->mcc_obj.q;
1453 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1454 goto mcc_cq_destroy;
1455
1456 /* Ask BE to create MCC queue */
1457 if (be_cmd_mccq_create(adapter, q, cq))
1458 goto mcc_q_free;
1459
1460 return 0;
1461
1462 mcc_q_free:
1463 be_queue_free(adapter, q);
1464 mcc_cq_destroy:
1465 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1466 mcc_cq_free:
1467 be_queue_free(adapter, cq);
1468 err:
1469 return -1;
1470 }
1471
1472 static void be_tx_queues_destroy(struct be_adapter *adapter)
1473 {
1474 struct be_queue_info *q;
1475
1476 q = &adapter->tx_obj.q;
1477 if (q->created)
1478 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1479 be_queue_free(adapter, q);
1480
1481 q = &adapter->tx_obj.cq;
1482 if (q->created)
1483 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1484 be_queue_free(adapter, q);
1485
1486 /* Clear any residual events */
1487 be_eq_clean(adapter, &adapter->tx_eq);
1488
1489 q = &adapter->tx_eq.q;
1490 if (q->created)
1491 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1492 be_queue_free(adapter, q);
1493 }
1494
1495 static int be_tx_queues_create(struct be_adapter *adapter)
1496 {
1497 struct be_queue_info *eq, *q, *cq;
1498
1499 adapter->tx_eq.max_eqd = 0;
1500 adapter->tx_eq.min_eqd = 0;
1501 adapter->tx_eq.cur_eqd = 96;
1502 adapter->tx_eq.enable_aic = false;
1503 /* Alloc Tx Event queue */
1504 eq = &adapter->tx_eq.q;
1505 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1506 return -1;
1507
1508 /* Ask BE to create Tx Event queue */
1509 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1510 goto tx_eq_free;
1511
1512 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1513
1514
1515 /* Alloc TX eth compl queue */
1516 cq = &adapter->tx_obj.cq;
1517 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1518 sizeof(struct be_eth_tx_compl)))
1519 goto tx_eq_destroy;
1520
1521 /* Ask BE to create Tx eth compl queue */
1522 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1523 goto tx_cq_free;
1524
1525 /* Alloc TX eth queue */
1526 q = &adapter->tx_obj.q;
1527 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1528 goto tx_cq_destroy;
1529
1530 /* Ask BE to create Tx eth queue */
1531 if (be_cmd_txq_create(adapter, q, cq))
1532 goto tx_q_free;
1533 return 0;
1534
1535 tx_q_free:
1536 be_queue_free(adapter, q);
1537 tx_cq_destroy:
1538 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1539 tx_cq_free:
1540 be_queue_free(adapter, cq);
1541 tx_eq_destroy:
1542 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1543 tx_eq_free:
1544 be_queue_free(adapter, eq);
1545 return -1;
1546 }
1547
1548 static void be_rx_queues_destroy(struct be_adapter *adapter)
1549 {
1550 struct be_queue_info *q;
1551 struct be_rx_obj *rxo;
1552 int i;
1553
1554 for_all_rx_queues(adapter, rxo, i) {
1555 q = &rxo->q;
1556 if (q->created) {
1557 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1558 /* After the rxq is invalidated, wait for a grace time
1559 * of 1ms for all dma to end and the flush compl to
1560 * arrive
1561 */
1562 mdelay(1);
1563 be_rx_q_clean(adapter, rxo);
1564 }
1565 be_queue_free(adapter, q);
1566
1567 q = &rxo->cq;
1568 if (q->created)
1569 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1570 be_queue_free(adapter, q);
1571
1572 /* Clear any residual events */
1573 q = &rxo->rx_eq.q;
1574 if (q->created) {
1575 be_eq_clean(adapter, &rxo->rx_eq);
1576 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1577 }
1578 be_queue_free(adapter, q);
1579 }
1580 }
1581
1582 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1583 {
1584 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1585 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1586 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1587 } else {
1588 dev_warn(&adapter->pdev->dev,
1589 "No support for multiple RX queues\n");
1590 return 1;
1591 }
1592 }
1593
1594 static int be_rx_queues_create(struct be_adapter *adapter)
1595 {
1596 struct be_queue_info *eq, *q, *cq;
1597 struct be_rx_obj *rxo;
1598 int rc, i;
1599
1600 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1601 msix_enabled(adapter) ?
1602 adapter->num_msix_vec - 1 : 1);
1603 if (adapter->num_rx_qs != MAX_RX_QS)
1604 dev_warn(&adapter->pdev->dev,
1605 "Can create only %d RX queues", adapter->num_rx_qs);
1606
1607 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1608 for_all_rx_queues(adapter, rxo, i) {
1609 rxo->adapter = adapter;
1610 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1611 rxo->rx_eq.enable_aic = true;
1612
1613 /* EQ */
1614 eq = &rxo->rx_eq.q;
1615 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1616 sizeof(struct be_eq_entry));
1617 if (rc)
1618 goto err;
1619
1620 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1621 if (rc)
1622 goto err;
1623
1624 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1625
1626 /* CQ */
1627 cq = &rxo->cq;
1628 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1629 sizeof(struct be_eth_rx_compl));
1630 if (rc)
1631 goto err;
1632
1633 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1634 if (rc)
1635 goto err;
1636 /* Rx Q */
1637 q = &rxo->q;
1638 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1639 sizeof(struct be_eth_rx_d));
1640 if (rc)
1641 goto err;
1642
1643 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1644 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1645 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1646 if (rc)
1647 goto err;
1648 }
1649
1650 if (be_multi_rxq(adapter)) {
1651 u8 rsstable[MAX_RSS_QS];
1652
1653 for_all_rss_queues(adapter, rxo, i)
1654 rsstable[i] = rxo->rss_id;
1655
1656 rc = be_cmd_rss_config(adapter, rsstable,
1657 adapter->num_rx_qs - 1);
1658 if (rc)
1659 goto err;
1660 }
1661
1662 return 0;
1663 err:
1664 be_rx_queues_destroy(adapter);
1665 return -1;
1666 }
1667
1668 static bool event_peek(struct be_eq_obj *eq_obj)
1669 {
1670 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1671 if (!eqe->evt)
1672 return false;
1673 else
1674 return true;
1675 }
1676
1677 static irqreturn_t be_intx(int irq, void *dev)
1678 {
1679 struct be_adapter *adapter = dev;
1680 struct be_rx_obj *rxo;
1681 int isr, i, tx = 0 , rx = 0;
1682
1683 if (lancer_chip(adapter)) {
1684 if (event_peek(&adapter->tx_eq))
1685 tx = event_handle(adapter, &adapter->tx_eq);
1686 for_all_rx_queues(adapter, rxo, i) {
1687 if (event_peek(&rxo->rx_eq))
1688 rx |= event_handle(adapter, &rxo->rx_eq);
1689 }
1690
1691 if (!(tx || rx))
1692 return IRQ_NONE;
1693
1694 } else {
1695 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1696 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1697 if (!isr)
1698 return IRQ_NONE;
1699
1700 if ((1 << adapter->tx_eq.eq_idx & isr))
1701 event_handle(adapter, &adapter->tx_eq);
1702
1703 for_all_rx_queues(adapter, rxo, i) {
1704 if ((1 << rxo->rx_eq.eq_idx & isr))
1705 event_handle(adapter, &rxo->rx_eq);
1706 }
1707 }
1708
1709 return IRQ_HANDLED;
1710 }
1711
1712 static irqreturn_t be_msix_rx(int irq, void *dev)
1713 {
1714 struct be_rx_obj *rxo = dev;
1715 struct be_adapter *adapter = rxo->adapter;
1716
1717 event_handle(adapter, &rxo->rx_eq);
1718
1719 return IRQ_HANDLED;
1720 }
1721
1722 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1723 {
1724 struct be_adapter *adapter = dev;
1725
1726 event_handle(adapter, &adapter->tx_eq);
1727
1728 return IRQ_HANDLED;
1729 }
1730
1731 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1732 {
1733 return (rxcp->tcpf && !rxcp->err) ? true : false;
1734 }
1735
1736 static int be_poll_rx(struct napi_struct *napi, int budget)
1737 {
1738 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1739 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1740 struct be_adapter *adapter = rxo->adapter;
1741 struct be_queue_info *rx_cq = &rxo->cq;
1742 struct be_rx_compl_info *rxcp;
1743 u32 work_done;
1744
1745 rxo->stats.rx_polls++;
1746 for (work_done = 0; work_done < budget; work_done++) {
1747 rxcp = be_rx_compl_get(rxo);
1748 if (!rxcp)
1749 break;
1750
1751 /* Ignore flush completions */
1752 if (rxcp->num_rcvd) {
1753 if (do_gro(rxcp))
1754 be_rx_compl_process_gro(adapter, rxo, rxcp);
1755 else
1756 be_rx_compl_process(adapter, rxo, rxcp);
1757 }
1758 be_rx_stats_update(rxo, rxcp);
1759 }
1760
1761 /* Refill the queue */
1762 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1763 be_post_rx_frags(rxo, GFP_ATOMIC);
1764
1765 /* All consumed */
1766 if (work_done < budget) {
1767 napi_complete(napi);
1768 be_cq_notify(adapter, rx_cq->id, true, work_done);
1769 } else {
1770 /* More to be consumed; continue with interrupts disabled */
1771 be_cq_notify(adapter, rx_cq->id, false, work_done);
1772 }
1773 return work_done;
1774 }
1775
1776 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1777 * For TX/MCC we don't honour budget; consume everything
1778 */
1779 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1780 {
1781 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1782 struct be_adapter *adapter =
1783 container_of(tx_eq, struct be_adapter, tx_eq);
1784 struct be_queue_info *txq = &adapter->tx_obj.q;
1785 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1786 struct be_eth_tx_compl *txcp;
1787 int tx_compl = 0, mcc_compl, status = 0;
1788 u16 end_idx;
1789
1790 while ((txcp = be_tx_compl_get(tx_cq))) {
1791 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1792 wrb_index, txcp);
1793 be_tx_compl_process(adapter, end_idx);
1794 tx_compl++;
1795 }
1796
1797 mcc_compl = be_process_mcc(adapter, &status);
1798
1799 napi_complete(napi);
1800
1801 if (mcc_compl) {
1802 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1803 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1804 }
1805
1806 if (tx_compl) {
1807 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1808
1809 /* As Tx wrbs have been freed up, wake up netdev queue if
1810 * it was stopped due to lack of tx wrbs.
1811 */
1812 if (netif_queue_stopped(adapter->netdev) &&
1813 atomic_read(&txq->used) < txq->len / 2) {
1814 netif_wake_queue(adapter->netdev);
1815 }
1816
1817 tx_stats(adapter)->be_tx_events++;
1818 tx_stats(adapter)->be_tx_compl += tx_compl;
1819 }
1820
1821 return 1;
1822 }
1823
1824 void be_detect_dump_ue(struct be_adapter *adapter)
1825 {
1826 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1827 u32 i;
1828
1829 pci_read_config_dword(adapter->pdev,
1830 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1831 pci_read_config_dword(adapter->pdev,
1832 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1833 pci_read_config_dword(adapter->pdev,
1834 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1835 pci_read_config_dword(adapter->pdev,
1836 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1837
1838 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1839 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1840
1841 if (ue_status_lo || ue_status_hi) {
1842 adapter->ue_detected = true;
1843 adapter->eeh_err = true;
1844 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1845 }
1846
1847 if (ue_status_lo) {
1848 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1849 if (ue_status_lo & 1)
1850 dev_err(&adapter->pdev->dev,
1851 "UE: %s bit set\n", ue_status_low_desc[i]);
1852 }
1853 }
1854 if (ue_status_hi) {
1855 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1856 if (ue_status_hi & 1)
1857 dev_err(&adapter->pdev->dev,
1858 "UE: %s bit set\n", ue_status_hi_desc[i]);
1859 }
1860 }
1861
1862 }
1863
1864 static void be_worker(struct work_struct *work)
1865 {
1866 struct be_adapter *adapter =
1867 container_of(work, struct be_adapter, work.work);
1868 struct be_rx_obj *rxo;
1869 int i;
1870
1871 if (!adapter->ue_detected && !lancer_chip(adapter))
1872 be_detect_dump_ue(adapter);
1873
1874 /* when interrupts are not yet enabled, just reap any pending
1875 * mcc completions */
1876 if (!netif_running(adapter->netdev)) {
1877 int mcc_compl, status = 0;
1878
1879 mcc_compl = be_process_mcc(adapter, &status);
1880
1881 if (mcc_compl) {
1882 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1883 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1884 }
1885
1886 goto reschedule;
1887 }
1888
1889 if (!adapter->stats_cmd_sent)
1890 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1891
1892 be_tx_rate_update(adapter);
1893
1894 for_all_rx_queues(adapter, rxo, i) {
1895 be_rx_rate_update(rxo);
1896 be_rx_eqd_update(adapter, rxo);
1897
1898 if (rxo->rx_post_starved) {
1899 rxo->rx_post_starved = false;
1900 be_post_rx_frags(rxo, GFP_KERNEL);
1901 }
1902 }
1903
1904 reschedule:
1905 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1906 }
1907
1908 static void be_msix_disable(struct be_adapter *adapter)
1909 {
1910 if (msix_enabled(adapter)) {
1911 pci_disable_msix(adapter->pdev);
1912 adapter->num_msix_vec = 0;
1913 }
1914 }
1915
1916 static void be_msix_enable(struct be_adapter *adapter)
1917 {
1918 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1919 int i, status, num_vec;
1920
1921 num_vec = be_num_rxqs_want(adapter) + 1;
1922
1923 for (i = 0; i < num_vec; i++)
1924 adapter->msix_entries[i].entry = i;
1925
1926 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
1927 if (status == 0) {
1928 goto done;
1929 } else if (status >= BE_MIN_MSIX_VECTORS) {
1930 num_vec = status;
1931 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1932 num_vec) == 0)
1933 goto done;
1934 }
1935 return;
1936 done:
1937 adapter->num_msix_vec = num_vec;
1938 return;
1939 }
1940
1941 static void be_sriov_enable(struct be_adapter *adapter)
1942 {
1943 be_check_sriov_fn_type(adapter);
1944 #ifdef CONFIG_PCI_IOV
1945 if (be_physfn(adapter) && num_vfs) {
1946 int status;
1947
1948 status = pci_enable_sriov(adapter->pdev, num_vfs);
1949 adapter->sriov_enabled = status ? false : true;
1950 }
1951 #endif
1952 }
1953
1954 static void be_sriov_disable(struct be_adapter *adapter)
1955 {
1956 #ifdef CONFIG_PCI_IOV
1957 if (adapter->sriov_enabled) {
1958 pci_disable_sriov(adapter->pdev);
1959 adapter->sriov_enabled = false;
1960 }
1961 #endif
1962 }
1963
1964 static inline int be_msix_vec_get(struct be_adapter *adapter,
1965 struct be_eq_obj *eq_obj)
1966 {
1967 return adapter->msix_entries[eq_obj->eq_idx].vector;
1968 }
1969
1970 static int be_request_irq(struct be_adapter *adapter,
1971 struct be_eq_obj *eq_obj,
1972 void *handler, char *desc, void *context)
1973 {
1974 struct net_device *netdev = adapter->netdev;
1975 int vec;
1976
1977 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1978 vec = be_msix_vec_get(adapter, eq_obj);
1979 return request_irq(vec, handler, 0, eq_obj->desc, context);
1980 }
1981
1982 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1983 void *context)
1984 {
1985 int vec = be_msix_vec_get(adapter, eq_obj);
1986 free_irq(vec, context);
1987 }
1988
1989 static int be_msix_register(struct be_adapter *adapter)
1990 {
1991 struct be_rx_obj *rxo;
1992 int status, i;
1993 char qname[10];
1994
1995 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1996 adapter);
1997 if (status)
1998 goto err;
1999
2000 for_all_rx_queues(adapter, rxo, i) {
2001 sprintf(qname, "rxq%d", i);
2002 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2003 qname, rxo);
2004 if (status)
2005 goto err_msix;
2006 }
2007
2008 return 0;
2009
2010 err_msix:
2011 be_free_irq(adapter, &adapter->tx_eq, adapter);
2012
2013 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2014 be_free_irq(adapter, &rxo->rx_eq, rxo);
2015
2016 err:
2017 dev_warn(&adapter->pdev->dev,
2018 "MSIX Request IRQ failed - err %d\n", status);
2019 be_msix_disable(adapter);
2020 return status;
2021 }
2022
2023 static int be_irq_register(struct be_adapter *adapter)
2024 {
2025 struct net_device *netdev = adapter->netdev;
2026 int status;
2027
2028 if (msix_enabled(adapter)) {
2029 status = be_msix_register(adapter);
2030 if (status == 0)
2031 goto done;
2032 /* INTx is not supported for VF */
2033 if (!be_physfn(adapter))
2034 return status;
2035 }
2036
2037 /* INTx */
2038 netdev->irq = adapter->pdev->irq;
2039 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2040 adapter);
2041 if (status) {
2042 dev_err(&adapter->pdev->dev,
2043 "INTx request IRQ failed - err %d\n", status);
2044 return status;
2045 }
2046 done:
2047 adapter->isr_registered = true;
2048 return 0;
2049 }
2050
2051 static void be_irq_unregister(struct be_adapter *adapter)
2052 {
2053 struct net_device *netdev = adapter->netdev;
2054 struct be_rx_obj *rxo;
2055 int i;
2056
2057 if (!adapter->isr_registered)
2058 return;
2059
2060 /* INTx */
2061 if (!msix_enabled(adapter)) {
2062 free_irq(netdev->irq, adapter);
2063 goto done;
2064 }
2065
2066 /* MSIx */
2067 be_free_irq(adapter, &adapter->tx_eq, adapter);
2068
2069 for_all_rx_queues(adapter, rxo, i)
2070 be_free_irq(adapter, &rxo->rx_eq, rxo);
2071
2072 done:
2073 adapter->isr_registered = false;
2074 }
2075
2076 static int be_close(struct net_device *netdev)
2077 {
2078 struct be_adapter *adapter = netdev_priv(netdev);
2079 struct be_rx_obj *rxo;
2080 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2081 int vec, i;
2082
2083 be_async_mcc_disable(adapter);
2084
2085 netif_carrier_off(netdev);
2086 adapter->link_up = false;
2087
2088 if (!lancer_chip(adapter))
2089 be_intr_set(adapter, false);
2090
2091 for_all_rx_queues(adapter, rxo, i)
2092 napi_disable(&rxo->rx_eq.napi);
2093
2094 napi_disable(&tx_eq->napi);
2095
2096 if (lancer_chip(adapter)) {
2097 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2098 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2099 for_all_rx_queues(adapter, rxo, i)
2100 be_cq_notify(adapter, rxo->cq.id, false, 0);
2101 }
2102
2103 if (msix_enabled(adapter)) {
2104 vec = be_msix_vec_get(adapter, tx_eq);
2105 synchronize_irq(vec);
2106
2107 for_all_rx_queues(adapter, rxo, i) {
2108 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2109 synchronize_irq(vec);
2110 }
2111 } else {
2112 synchronize_irq(netdev->irq);
2113 }
2114 be_irq_unregister(adapter);
2115
2116 /* Wait for all pending tx completions to arrive so that
2117 * all tx skbs are freed.
2118 */
2119 be_tx_compl_clean(adapter);
2120
2121 return 0;
2122 }
2123
2124 static int be_open(struct net_device *netdev)
2125 {
2126 struct be_adapter *adapter = netdev_priv(netdev);
2127 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2128 struct be_rx_obj *rxo;
2129 bool link_up;
2130 int status, i;
2131 u8 mac_speed;
2132 u16 link_speed;
2133
2134 for_all_rx_queues(adapter, rxo, i) {
2135 be_post_rx_frags(rxo, GFP_KERNEL);
2136 napi_enable(&rxo->rx_eq.napi);
2137 }
2138 napi_enable(&tx_eq->napi);
2139
2140 be_irq_register(adapter);
2141
2142 if (!lancer_chip(adapter))
2143 be_intr_set(adapter, true);
2144
2145 /* The evt queues are created in unarmed state; arm them */
2146 for_all_rx_queues(adapter, rxo, i) {
2147 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2148 be_cq_notify(adapter, rxo->cq.id, true, 0);
2149 }
2150 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2151
2152 /* Now that interrupts are on we can process async mcc */
2153 be_async_mcc_enable(adapter);
2154
2155 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2156 &link_speed);
2157 if (status)
2158 goto err;
2159 be_link_status_update(adapter, link_up);
2160
2161 if (be_physfn(adapter)) {
2162 status = be_vid_config(adapter, false, 0);
2163 if (status)
2164 goto err;
2165
2166 status = be_cmd_set_flow_control(adapter,
2167 adapter->tx_fc, adapter->rx_fc);
2168 if (status)
2169 goto err;
2170 }
2171
2172 return 0;
2173 err:
2174 be_close(adapter->netdev);
2175 return -EIO;
2176 }
2177
2178 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2179 {
2180 struct be_dma_mem cmd;
2181 int status = 0;
2182 u8 mac[ETH_ALEN];
2183
2184 memset(mac, 0, ETH_ALEN);
2185
2186 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2187 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2188 GFP_KERNEL);
2189 if (cmd.va == NULL)
2190 return -1;
2191 memset(cmd.va, 0, cmd.size);
2192
2193 if (enable) {
2194 status = pci_write_config_dword(adapter->pdev,
2195 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2196 if (status) {
2197 dev_err(&adapter->pdev->dev,
2198 "Could not enable Wake-on-lan\n");
2199 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2200 cmd.dma);
2201 return status;
2202 }
2203 status = be_cmd_enable_magic_wol(adapter,
2204 adapter->netdev->dev_addr, &cmd);
2205 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2206 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2207 } else {
2208 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2209 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2210 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2211 }
2212
2213 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2214 return status;
2215 }
2216
2217 /*
2218 * Generate a seed MAC address from the PF MAC Address using jhash.
2219 * MAC Address for VFs are assigned incrementally starting from the seed.
2220 * These addresses are programmed in the ASIC by the PF and the VF driver
2221 * queries for the MAC address during its probe.
2222 */
2223 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2224 {
2225 u32 vf = 0;
2226 int status = 0;
2227 u8 mac[ETH_ALEN];
2228
2229 be_vf_eth_addr_generate(adapter, mac);
2230
2231 for (vf = 0; vf < num_vfs; vf++) {
2232 status = be_cmd_pmac_add(adapter, mac,
2233 adapter->vf_cfg[vf].vf_if_handle,
2234 &adapter->vf_cfg[vf].vf_pmac_id,
2235 vf + 1);
2236 if (status)
2237 dev_err(&adapter->pdev->dev,
2238 "Mac address add failed for VF %d\n", vf);
2239 else
2240 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2241
2242 mac[5] += 1;
2243 }
2244 return status;
2245 }
2246
2247 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2248 {
2249 u32 vf;
2250
2251 for (vf = 0; vf < num_vfs; vf++) {
2252 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2253 be_cmd_pmac_del(adapter,
2254 adapter->vf_cfg[vf].vf_if_handle,
2255 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2256 }
2257 }
2258
2259 static int be_setup(struct be_adapter *adapter)
2260 {
2261 struct net_device *netdev = adapter->netdev;
2262 u32 cap_flags, en_flags, vf = 0;
2263 int status;
2264 u8 mac[ETH_ALEN];
2265
2266 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2267 BE_IF_FLAGS_BROADCAST |
2268 BE_IF_FLAGS_MULTICAST;
2269
2270 if (be_physfn(adapter)) {
2271 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2272 BE_IF_FLAGS_PROMISCUOUS |
2273 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2274 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2275
2276 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2277 cap_flags |= BE_IF_FLAGS_RSS;
2278 en_flags |= BE_IF_FLAGS_RSS;
2279 }
2280 }
2281
2282 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2283 netdev->dev_addr, false/* pmac_invalid */,
2284 &adapter->if_handle, &adapter->pmac_id, 0);
2285 if (status != 0)
2286 goto do_none;
2287
2288 if (be_physfn(adapter)) {
2289 if (adapter->sriov_enabled) {
2290 while (vf < num_vfs) {
2291 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2292 BE_IF_FLAGS_BROADCAST;
2293 status = be_cmd_if_create(adapter, cap_flags,
2294 en_flags, mac, true,
2295 &adapter->vf_cfg[vf].vf_if_handle,
2296 NULL, vf+1);
2297 if (status) {
2298 dev_err(&adapter->pdev->dev,
2299 "Interface Create failed for VF %d\n",
2300 vf);
2301 goto if_destroy;
2302 }
2303 adapter->vf_cfg[vf].vf_pmac_id =
2304 BE_INVALID_PMAC_ID;
2305 vf++;
2306 }
2307 }
2308 } else {
2309 status = be_cmd_mac_addr_query(adapter, mac,
2310 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2311 if (!status) {
2312 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2313 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2314 }
2315 }
2316
2317 status = be_tx_queues_create(adapter);
2318 if (status != 0)
2319 goto if_destroy;
2320
2321 status = be_rx_queues_create(adapter);
2322 if (status != 0)
2323 goto tx_qs_destroy;
2324
2325 status = be_mcc_queues_create(adapter);
2326 if (status != 0)
2327 goto rx_qs_destroy;
2328
2329 adapter->link_speed = -1;
2330
2331 return 0;
2332
2333 rx_qs_destroy:
2334 be_rx_queues_destroy(adapter);
2335 tx_qs_destroy:
2336 be_tx_queues_destroy(adapter);
2337 if_destroy:
2338 if (be_physfn(adapter) && adapter->sriov_enabled)
2339 for (vf = 0; vf < num_vfs; vf++)
2340 if (adapter->vf_cfg[vf].vf_if_handle)
2341 be_cmd_if_destroy(adapter,
2342 adapter->vf_cfg[vf].vf_if_handle,
2343 vf + 1);
2344 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2345 do_none:
2346 return status;
2347 }
2348
2349 static int be_clear(struct be_adapter *adapter)
2350 {
2351 int vf;
2352
2353 if (be_physfn(adapter) && adapter->sriov_enabled)
2354 be_vf_eth_addr_rem(adapter);
2355
2356 be_mcc_queues_destroy(adapter);
2357 be_rx_queues_destroy(adapter);
2358 be_tx_queues_destroy(adapter);
2359 adapter->eq_next_idx = 0;
2360
2361 if (be_physfn(adapter) && adapter->sriov_enabled)
2362 for (vf = 0; vf < num_vfs; vf++)
2363 if (adapter->vf_cfg[vf].vf_if_handle)
2364 be_cmd_if_destroy(adapter,
2365 adapter->vf_cfg[vf].vf_if_handle,
2366 vf + 1);
2367
2368 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2369
2370 /* tell fw we're done with firing cmds */
2371 be_cmd_fw_clean(adapter);
2372 return 0;
2373 }
2374
2375
2376 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2377 static bool be_flash_redboot(struct be_adapter *adapter,
2378 const u8 *p, u32 img_start, int image_size,
2379 int hdr_size)
2380 {
2381 u32 crc_offset;
2382 u8 flashed_crc[4];
2383 int status;
2384
2385 crc_offset = hdr_size + img_start + image_size - 4;
2386
2387 p += crc_offset;
2388
2389 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2390 (image_size - 4));
2391 if (status) {
2392 dev_err(&adapter->pdev->dev,
2393 "could not get crc from flash, not flashing redboot\n");
2394 return false;
2395 }
2396
2397 /*update redboot only if crc does not match*/
2398 if (!memcmp(flashed_crc, p, 4))
2399 return false;
2400 else
2401 return true;
2402 }
2403
2404 static int be_flash_data(struct be_adapter *adapter,
2405 const struct firmware *fw,
2406 struct be_dma_mem *flash_cmd, int num_of_images)
2407
2408 {
2409 int status = 0, i, filehdr_size = 0;
2410 u32 total_bytes = 0, flash_op;
2411 int num_bytes;
2412 const u8 *p = fw->data;
2413 struct be_cmd_write_flashrom *req = flash_cmd->va;
2414 const struct flash_comp *pflashcomp;
2415 int num_comp;
2416
2417 static const struct flash_comp gen3_flash_types[9] = {
2418 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2419 FLASH_IMAGE_MAX_SIZE_g3},
2420 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2421 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2422 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2423 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2424 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2425 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2426 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2427 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2428 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2429 FLASH_IMAGE_MAX_SIZE_g3},
2430 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2431 FLASH_IMAGE_MAX_SIZE_g3},
2432 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2433 FLASH_IMAGE_MAX_SIZE_g3},
2434 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2435 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2436 };
2437 static const struct flash_comp gen2_flash_types[8] = {
2438 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2439 FLASH_IMAGE_MAX_SIZE_g2},
2440 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2441 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2442 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2443 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2444 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2445 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2446 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2447 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2448 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2449 FLASH_IMAGE_MAX_SIZE_g2},
2450 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2451 FLASH_IMAGE_MAX_SIZE_g2},
2452 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2453 FLASH_IMAGE_MAX_SIZE_g2}
2454 };
2455
2456 if (adapter->generation == BE_GEN3) {
2457 pflashcomp = gen3_flash_types;
2458 filehdr_size = sizeof(struct flash_file_hdr_g3);
2459 num_comp = ARRAY_SIZE(gen3_flash_types);
2460 } else {
2461 pflashcomp = gen2_flash_types;
2462 filehdr_size = sizeof(struct flash_file_hdr_g2);
2463 num_comp = ARRAY_SIZE(gen2_flash_types);
2464 }
2465 for (i = 0; i < num_comp; i++) {
2466 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2467 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2468 continue;
2469 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2470 (!be_flash_redboot(adapter, fw->data,
2471 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2472 (num_of_images * sizeof(struct image_hdr)))))
2473 continue;
2474 p = fw->data;
2475 p += filehdr_size + pflashcomp[i].offset
2476 + (num_of_images * sizeof(struct image_hdr));
2477 if (p + pflashcomp[i].size > fw->data + fw->size)
2478 return -1;
2479 total_bytes = pflashcomp[i].size;
2480 while (total_bytes) {
2481 if (total_bytes > 32*1024)
2482 num_bytes = 32*1024;
2483 else
2484 num_bytes = total_bytes;
2485 total_bytes -= num_bytes;
2486
2487 if (!total_bytes)
2488 flash_op = FLASHROM_OPER_FLASH;
2489 else
2490 flash_op = FLASHROM_OPER_SAVE;
2491 memcpy(req->params.data_buf, p, num_bytes);
2492 p += num_bytes;
2493 status = be_cmd_write_flashrom(adapter, flash_cmd,
2494 pflashcomp[i].optype, flash_op, num_bytes);
2495 if (status) {
2496 dev_err(&adapter->pdev->dev,
2497 "cmd to write to flash rom failed.\n");
2498 return -1;
2499 }
2500 yield();
2501 }
2502 }
2503 return 0;
2504 }
2505
2506 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2507 {
2508 if (fhdr == NULL)
2509 return 0;
2510 if (fhdr->build[0] == '3')
2511 return BE_GEN3;
2512 else if (fhdr->build[0] == '2')
2513 return BE_GEN2;
2514 else
2515 return 0;
2516 }
2517
2518 int be_load_fw(struct be_adapter *adapter, u8 *func)
2519 {
2520 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2521 const struct firmware *fw;
2522 struct flash_file_hdr_g2 *fhdr;
2523 struct flash_file_hdr_g3 *fhdr3;
2524 struct image_hdr *img_hdr_ptr = NULL;
2525 struct be_dma_mem flash_cmd;
2526 int status, i = 0, num_imgs = 0;
2527 const u8 *p;
2528
2529 if (!netif_running(adapter->netdev)) {
2530 dev_err(&adapter->pdev->dev,
2531 "Firmware load not allowed (interface is down)\n");
2532 return -EPERM;
2533 }
2534
2535 strcpy(fw_file, func);
2536
2537 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2538 if (status)
2539 goto fw_exit;
2540
2541 p = fw->data;
2542 fhdr = (struct flash_file_hdr_g2 *) p;
2543 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2544
2545 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2546 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2547 &flash_cmd.dma, GFP_KERNEL);
2548 if (!flash_cmd.va) {
2549 status = -ENOMEM;
2550 dev_err(&adapter->pdev->dev,
2551 "Memory allocation failure while flashing\n");
2552 goto fw_exit;
2553 }
2554
2555 if ((adapter->generation == BE_GEN3) &&
2556 (get_ufigen_type(fhdr) == BE_GEN3)) {
2557 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2558 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2559 for (i = 0; i < num_imgs; i++) {
2560 img_hdr_ptr = (struct image_hdr *) (fw->data +
2561 (sizeof(struct flash_file_hdr_g3) +
2562 i * sizeof(struct image_hdr)));
2563 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2564 status = be_flash_data(adapter, fw, &flash_cmd,
2565 num_imgs);
2566 }
2567 } else if ((adapter->generation == BE_GEN2) &&
2568 (get_ufigen_type(fhdr) == BE_GEN2)) {
2569 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2570 } else {
2571 dev_err(&adapter->pdev->dev,
2572 "UFI and Interface are not compatible for flashing\n");
2573 status = -1;
2574 }
2575
2576 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2577 flash_cmd.dma);
2578 if (status) {
2579 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2580 goto fw_exit;
2581 }
2582
2583 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2584
2585 fw_exit:
2586 release_firmware(fw);
2587 return status;
2588 }
2589
2590 static struct net_device_ops be_netdev_ops = {
2591 .ndo_open = be_open,
2592 .ndo_stop = be_close,
2593 .ndo_start_xmit = be_xmit,
2594 .ndo_set_rx_mode = be_set_multicast_list,
2595 .ndo_set_mac_address = be_mac_addr_set,
2596 .ndo_change_mtu = be_change_mtu,
2597 .ndo_validate_addr = eth_validate_addr,
2598 .ndo_vlan_rx_register = be_vlan_register,
2599 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2600 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2601 .ndo_set_vf_mac = be_set_vf_mac,
2602 .ndo_set_vf_vlan = be_set_vf_vlan,
2603 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2604 .ndo_get_vf_config = be_get_vf_config
2605 };
2606
2607 static void be_netdev_init(struct net_device *netdev)
2608 {
2609 struct be_adapter *adapter = netdev_priv(netdev);
2610 struct be_rx_obj *rxo;
2611 int i;
2612
2613 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2614 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2615 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2616 NETIF_F_GRO | NETIF_F_TSO6;
2617
2618 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2619 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2620
2621 if (lancer_chip(adapter))
2622 netdev->vlan_features |= NETIF_F_TSO6;
2623
2624 netdev->flags |= IFF_MULTICAST;
2625
2626 adapter->rx_csum = true;
2627
2628 /* Default settings for Rx and Tx flow control */
2629 adapter->rx_fc = true;
2630 adapter->tx_fc = true;
2631
2632 netif_set_gso_max_size(netdev, 65535);
2633
2634 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2635
2636 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2637
2638 for_all_rx_queues(adapter, rxo, i)
2639 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2640 BE_NAPI_WEIGHT);
2641
2642 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2643 BE_NAPI_WEIGHT);
2644 }
2645
2646 static void be_unmap_pci_bars(struct be_adapter *adapter)
2647 {
2648 if (adapter->csr)
2649 iounmap(adapter->csr);
2650 if (adapter->db)
2651 iounmap(adapter->db);
2652 if (adapter->pcicfg && be_physfn(adapter))
2653 iounmap(adapter->pcicfg);
2654 }
2655
2656 static int be_map_pci_bars(struct be_adapter *adapter)
2657 {
2658 u8 __iomem *addr;
2659 int pcicfg_reg, db_reg;
2660
2661 if (lancer_chip(adapter)) {
2662 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2663 pci_resource_len(adapter->pdev, 0));
2664 if (addr == NULL)
2665 return -ENOMEM;
2666 adapter->db = addr;
2667 return 0;
2668 }
2669
2670 if (be_physfn(adapter)) {
2671 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2672 pci_resource_len(adapter->pdev, 2));
2673 if (addr == NULL)
2674 return -ENOMEM;
2675 adapter->csr = addr;
2676 }
2677
2678 if (adapter->generation == BE_GEN2) {
2679 pcicfg_reg = 1;
2680 db_reg = 4;
2681 } else {
2682 pcicfg_reg = 0;
2683 if (be_physfn(adapter))
2684 db_reg = 4;
2685 else
2686 db_reg = 0;
2687 }
2688 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2689 pci_resource_len(adapter->pdev, db_reg));
2690 if (addr == NULL)
2691 goto pci_map_err;
2692 adapter->db = addr;
2693
2694 if (be_physfn(adapter)) {
2695 addr = ioremap_nocache(
2696 pci_resource_start(adapter->pdev, pcicfg_reg),
2697 pci_resource_len(adapter->pdev, pcicfg_reg));
2698 if (addr == NULL)
2699 goto pci_map_err;
2700 adapter->pcicfg = addr;
2701 } else
2702 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2703
2704 return 0;
2705 pci_map_err:
2706 be_unmap_pci_bars(adapter);
2707 return -ENOMEM;
2708 }
2709
2710
2711 static void be_ctrl_cleanup(struct be_adapter *adapter)
2712 {
2713 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2714
2715 be_unmap_pci_bars(adapter);
2716
2717 if (mem->va)
2718 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2719 mem->dma);
2720
2721 mem = &adapter->mc_cmd_mem;
2722 if (mem->va)
2723 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2724 mem->dma);
2725 }
2726
2727 static int be_ctrl_init(struct be_adapter *adapter)
2728 {
2729 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2730 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2731 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2732 int status;
2733
2734 status = be_map_pci_bars(adapter);
2735 if (status)
2736 goto done;
2737
2738 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2739 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2740 mbox_mem_alloc->size,
2741 &mbox_mem_alloc->dma,
2742 GFP_KERNEL);
2743 if (!mbox_mem_alloc->va) {
2744 status = -ENOMEM;
2745 goto unmap_pci_bars;
2746 }
2747
2748 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2749 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2750 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2751 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2752
2753 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2754 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2755 mc_cmd_mem->size, &mc_cmd_mem->dma,
2756 GFP_KERNEL);
2757 if (mc_cmd_mem->va == NULL) {
2758 status = -ENOMEM;
2759 goto free_mbox;
2760 }
2761 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2762
2763 mutex_init(&adapter->mbox_lock);
2764 spin_lock_init(&adapter->mcc_lock);
2765 spin_lock_init(&adapter->mcc_cq_lock);
2766
2767 init_completion(&adapter->flash_compl);
2768 pci_save_state(adapter->pdev);
2769 return 0;
2770
2771 free_mbox:
2772 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2773 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2774
2775 unmap_pci_bars:
2776 be_unmap_pci_bars(adapter);
2777
2778 done:
2779 return status;
2780 }
2781
2782 static void be_stats_cleanup(struct be_adapter *adapter)
2783 {
2784 struct be_dma_mem *cmd = &adapter->stats_cmd;
2785
2786 if (cmd->va)
2787 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2788 cmd->va, cmd->dma);
2789 }
2790
2791 static int be_stats_init(struct be_adapter *adapter)
2792 {
2793 struct be_dma_mem *cmd = &adapter->stats_cmd;
2794
2795 cmd->size = sizeof(struct be_cmd_req_get_stats);
2796 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2797 GFP_KERNEL);
2798 if (cmd->va == NULL)
2799 return -1;
2800 memset(cmd->va, 0, cmd->size);
2801 return 0;
2802 }
2803
2804 static void __devexit be_remove(struct pci_dev *pdev)
2805 {
2806 struct be_adapter *adapter = pci_get_drvdata(pdev);
2807
2808 if (!adapter)
2809 return;
2810
2811 cancel_delayed_work_sync(&adapter->work);
2812
2813 unregister_netdev(adapter->netdev);
2814
2815 be_clear(adapter);
2816
2817 be_stats_cleanup(adapter);
2818
2819 be_ctrl_cleanup(adapter);
2820
2821 be_sriov_disable(adapter);
2822
2823 be_msix_disable(adapter);
2824
2825 pci_set_drvdata(pdev, NULL);
2826 pci_release_regions(pdev);
2827 pci_disable_device(pdev);
2828
2829 free_netdev(adapter->netdev);
2830 }
2831
2832 static int be_get_config(struct be_adapter *adapter)
2833 {
2834 int status;
2835 u8 mac[ETH_ALEN];
2836
2837 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2838 if (status)
2839 return status;
2840
2841 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2842 &adapter->function_mode, &adapter->function_caps);
2843 if (status)
2844 return status;
2845
2846 memset(mac, 0, ETH_ALEN);
2847
2848 if (be_physfn(adapter)) {
2849 status = be_cmd_mac_addr_query(adapter, mac,
2850 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2851
2852 if (status)
2853 return status;
2854
2855 if (!is_valid_ether_addr(mac))
2856 return -EADDRNOTAVAIL;
2857
2858 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2859 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2860 }
2861
2862 if (adapter->function_mode & 0x400)
2863 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2864 else
2865 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2866
2867 status = be_cmd_get_cntl_attributes(adapter);
2868 if (status)
2869 return status;
2870
2871 be_cmd_check_native_mode(adapter);
2872 return 0;
2873 }
2874
2875 static int be_dev_family_check(struct be_adapter *adapter)
2876 {
2877 struct pci_dev *pdev = adapter->pdev;
2878 u32 sli_intf = 0, if_type;
2879
2880 switch (pdev->device) {
2881 case BE_DEVICE_ID1:
2882 case OC_DEVICE_ID1:
2883 adapter->generation = BE_GEN2;
2884 break;
2885 case BE_DEVICE_ID2:
2886 case OC_DEVICE_ID2:
2887 adapter->generation = BE_GEN3;
2888 break;
2889 case OC_DEVICE_ID3:
2890 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2891 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2892 SLI_INTF_IF_TYPE_SHIFT;
2893
2894 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2895 if_type != 0x02) {
2896 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2897 return -EINVAL;
2898 }
2899 if (num_vfs > 0) {
2900 dev_err(&pdev->dev, "VFs not supported\n");
2901 return -EINVAL;
2902 }
2903 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2904 SLI_INTF_FAMILY_SHIFT);
2905 adapter->generation = BE_GEN3;
2906 break;
2907 default:
2908 adapter->generation = 0;
2909 }
2910 return 0;
2911 }
2912
2913 static int lancer_wait_ready(struct be_adapter *adapter)
2914 {
2915 #define SLIPORT_READY_TIMEOUT 500
2916 u32 sliport_status;
2917 int status = 0, i;
2918
2919 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2920 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2921 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2922 break;
2923
2924 msleep(20);
2925 }
2926
2927 if (i == SLIPORT_READY_TIMEOUT)
2928 status = -1;
2929
2930 return status;
2931 }
2932
2933 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2934 {
2935 int status;
2936 u32 sliport_status, err, reset_needed;
2937 status = lancer_wait_ready(adapter);
2938 if (!status) {
2939 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2940 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2941 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2942 if (err && reset_needed) {
2943 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2944 adapter->db + SLIPORT_CONTROL_OFFSET);
2945
2946 /* check adapter has corrected the error */
2947 status = lancer_wait_ready(adapter);
2948 sliport_status = ioread32(adapter->db +
2949 SLIPORT_STATUS_OFFSET);
2950 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2951 SLIPORT_STATUS_RN_MASK);
2952 if (status || sliport_status)
2953 status = -1;
2954 } else if (err || reset_needed) {
2955 status = -1;
2956 }
2957 }
2958 return status;
2959 }
2960
2961 static int __devinit be_probe(struct pci_dev *pdev,
2962 const struct pci_device_id *pdev_id)
2963 {
2964 int status = 0;
2965 struct be_adapter *adapter;
2966 struct net_device *netdev;
2967
2968 status = pci_enable_device(pdev);
2969 if (status)
2970 goto do_none;
2971
2972 status = pci_request_regions(pdev, DRV_NAME);
2973 if (status)
2974 goto disable_dev;
2975 pci_set_master(pdev);
2976
2977 netdev = alloc_etherdev(sizeof(struct be_adapter));
2978 if (netdev == NULL) {
2979 status = -ENOMEM;
2980 goto rel_reg;
2981 }
2982 adapter = netdev_priv(netdev);
2983 adapter->pdev = pdev;
2984 pci_set_drvdata(pdev, adapter);
2985
2986 status = be_dev_family_check(adapter);
2987 if (status)
2988 goto free_netdev;
2989
2990 adapter->netdev = netdev;
2991 SET_NETDEV_DEV(netdev, &pdev->dev);
2992
2993 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2994 if (!status) {
2995 netdev->features |= NETIF_F_HIGHDMA;
2996 } else {
2997 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2998 if (status) {
2999 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3000 goto free_netdev;
3001 }
3002 }
3003
3004 be_sriov_enable(adapter);
3005
3006 status = be_ctrl_init(adapter);
3007 if (status)
3008 goto free_netdev;
3009
3010 if (lancer_chip(adapter)) {
3011 status = lancer_test_and_set_rdy_state(adapter);
3012 if (status) {
3013 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3014 goto free_netdev;
3015 }
3016 }
3017
3018 /* sync up with fw's ready state */
3019 if (be_physfn(adapter)) {
3020 status = be_cmd_POST(adapter);
3021 if (status)
3022 goto ctrl_clean;
3023 }
3024
3025 /* tell fw we're ready to fire cmds */
3026 status = be_cmd_fw_init(adapter);
3027 if (status)
3028 goto ctrl_clean;
3029
3030 status = be_cmd_reset_function(adapter);
3031 if (status)
3032 goto ctrl_clean;
3033
3034 status = be_stats_init(adapter);
3035 if (status)
3036 goto ctrl_clean;
3037
3038 status = be_get_config(adapter);
3039 if (status)
3040 goto stats_clean;
3041
3042 be_msix_enable(adapter);
3043
3044 INIT_DELAYED_WORK(&adapter->work, be_worker);
3045
3046 status = be_setup(adapter);
3047 if (status)
3048 goto msix_disable;
3049
3050 be_netdev_init(netdev);
3051 status = register_netdev(netdev);
3052 if (status != 0)
3053 goto unsetup;
3054 netif_carrier_off(netdev);
3055
3056 if (be_physfn(adapter) && adapter->sriov_enabled) {
3057 status = be_vf_eth_addr_config(adapter);
3058 if (status)
3059 goto unreg_netdev;
3060 }
3061
3062 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3063 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3064 return 0;
3065
3066 unreg_netdev:
3067 unregister_netdev(netdev);
3068 unsetup:
3069 be_clear(adapter);
3070 msix_disable:
3071 be_msix_disable(adapter);
3072 stats_clean:
3073 be_stats_cleanup(adapter);
3074 ctrl_clean:
3075 be_ctrl_cleanup(adapter);
3076 free_netdev:
3077 be_sriov_disable(adapter);
3078 free_netdev(netdev);
3079 pci_set_drvdata(pdev, NULL);
3080 rel_reg:
3081 pci_release_regions(pdev);
3082 disable_dev:
3083 pci_disable_device(pdev);
3084 do_none:
3085 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3086 return status;
3087 }
3088
3089 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3090 {
3091 struct be_adapter *adapter = pci_get_drvdata(pdev);
3092 struct net_device *netdev = adapter->netdev;
3093
3094 cancel_delayed_work_sync(&adapter->work);
3095 if (adapter->wol)
3096 be_setup_wol(adapter, true);
3097
3098 netif_device_detach(netdev);
3099 if (netif_running(netdev)) {
3100 rtnl_lock();
3101 be_close(netdev);
3102 rtnl_unlock();
3103 }
3104 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3105 be_clear(adapter);
3106
3107 be_msix_disable(adapter);
3108 pci_save_state(pdev);
3109 pci_disable_device(pdev);
3110 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3111 return 0;
3112 }
3113
3114 static int be_resume(struct pci_dev *pdev)
3115 {
3116 int status = 0;
3117 struct be_adapter *adapter = pci_get_drvdata(pdev);
3118 struct net_device *netdev = adapter->netdev;
3119
3120 netif_device_detach(netdev);
3121
3122 status = pci_enable_device(pdev);
3123 if (status)
3124 return status;
3125
3126 pci_set_power_state(pdev, 0);
3127 pci_restore_state(pdev);
3128
3129 be_msix_enable(adapter);
3130 /* tell fw we're ready to fire cmds */
3131 status = be_cmd_fw_init(adapter);
3132 if (status)
3133 return status;
3134
3135 be_setup(adapter);
3136 if (netif_running(netdev)) {
3137 rtnl_lock();
3138 be_open(netdev);
3139 rtnl_unlock();
3140 }
3141 netif_device_attach(netdev);
3142
3143 if (adapter->wol)
3144 be_setup_wol(adapter, false);
3145
3146 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3147 return 0;
3148 }
3149
3150 /*
3151 * An FLR will stop BE from DMAing any data.
3152 */
3153 static void be_shutdown(struct pci_dev *pdev)
3154 {
3155 struct be_adapter *adapter = pci_get_drvdata(pdev);
3156
3157 if (!adapter)
3158 return;
3159
3160 cancel_delayed_work_sync(&adapter->work);
3161
3162 netif_device_detach(adapter->netdev);
3163
3164 be_cmd_reset_function(adapter);
3165
3166 if (adapter->wol)
3167 be_setup_wol(adapter, true);
3168
3169 pci_disable_device(pdev);
3170 }
3171
3172 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3173 pci_channel_state_t state)
3174 {
3175 struct be_adapter *adapter = pci_get_drvdata(pdev);
3176 struct net_device *netdev = adapter->netdev;
3177
3178 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3179
3180 adapter->eeh_err = true;
3181
3182 netif_device_detach(netdev);
3183
3184 if (netif_running(netdev)) {
3185 rtnl_lock();
3186 be_close(netdev);
3187 rtnl_unlock();
3188 }
3189 be_clear(adapter);
3190
3191 if (state == pci_channel_io_perm_failure)
3192 return PCI_ERS_RESULT_DISCONNECT;
3193
3194 pci_disable_device(pdev);
3195
3196 return PCI_ERS_RESULT_NEED_RESET;
3197 }
3198
3199 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3200 {
3201 struct be_adapter *adapter = pci_get_drvdata(pdev);
3202 int status;
3203
3204 dev_info(&adapter->pdev->dev, "EEH reset\n");
3205 adapter->eeh_err = false;
3206
3207 status = pci_enable_device(pdev);
3208 if (status)
3209 return PCI_ERS_RESULT_DISCONNECT;
3210
3211 pci_set_master(pdev);
3212 pci_set_power_state(pdev, 0);
3213 pci_restore_state(pdev);
3214
3215 /* Check if card is ok and fw is ready */
3216 status = be_cmd_POST(adapter);
3217 if (status)
3218 return PCI_ERS_RESULT_DISCONNECT;
3219
3220 return PCI_ERS_RESULT_RECOVERED;
3221 }
3222
3223 static void be_eeh_resume(struct pci_dev *pdev)
3224 {
3225 int status = 0;
3226 struct be_adapter *adapter = pci_get_drvdata(pdev);
3227 struct net_device *netdev = adapter->netdev;
3228
3229 dev_info(&adapter->pdev->dev, "EEH resume\n");
3230
3231 pci_save_state(pdev);
3232
3233 /* tell fw we're ready to fire cmds */
3234 status = be_cmd_fw_init(adapter);
3235 if (status)
3236 goto err;
3237
3238 status = be_setup(adapter);
3239 if (status)
3240 goto err;
3241
3242 if (netif_running(netdev)) {
3243 status = be_open(netdev);
3244 if (status)
3245 goto err;
3246 }
3247 netif_device_attach(netdev);
3248 return;
3249 err:
3250 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3251 }
3252
3253 static struct pci_error_handlers be_eeh_handlers = {
3254 .error_detected = be_eeh_err_detected,
3255 .slot_reset = be_eeh_reset,
3256 .resume = be_eeh_resume,
3257 };
3258
3259 static struct pci_driver be_driver = {
3260 .name = DRV_NAME,
3261 .id_table = be_dev_ids,
3262 .probe = be_probe,
3263 .remove = be_remove,
3264 .suspend = be_suspend,
3265 .resume = be_resume,
3266 .shutdown = be_shutdown,
3267 .err_handler = &be_eeh_handlers
3268 };
3269
3270 static int __init be_init_module(void)
3271 {
3272 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3273 rx_frag_size != 2048) {
3274 printk(KERN_WARNING DRV_NAME
3275 " : Module param rx_frag_size must be 2048/4096/8192."
3276 " Using 2048\n");
3277 rx_frag_size = 2048;
3278 }
3279
3280 if (num_vfs > 32) {
3281 printk(KERN_WARNING DRV_NAME
3282 " : Module param num_vfs must not be greater than 32."
3283 "Using 32\n");
3284 num_vfs = 32;
3285 }
3286
3287 return pci_register_driver(&be_driver);
3288 }
3289 module_init(be_init_module);
3290
3291 static void __exit be_exit_module(void)
3292 {
3293 pci_unregister_driver(&be_driver);
3294 }
3295 module_exit(be_exit_module);
This page took 0.177704 seconds and 5 git commands to generate.