be2net: add code to dump registers for debug
[deliverable/linux.git] / drivers / net / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
36 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
38 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
40 { 0 }
41 };
42 MODULE_DEVICE_TABLE(pci, be_dev_ids);
43 /* UE Status Low CSR */
44 static char *ue_status_low_desc[] = {
45 "CEV",
46 "CTX",
47 "DBUF",
48 "ERX",
49 "Host",
50 "MPU",
51 "NDMA",
52 "PTC ",
53 "RDMA ",
54 "RXF ",
55 "RXIPS ",
56 "RXULP0 ",
57 "RXULP1 ",
58 "RXULP2 ",
59 "TIM ",
60 "TPOST ",
61 "TPRE ",
62 "TXIPS ",
63 "TXULP0 ",
64 "TXULP1 ",
65 "UC ",
66 "WDMA ",
67 "TXULP2 ",
68 "HOST1 ",
69 "P0_OB_LINK ",
70 "P1_OB_LINK ",
71 "HOST_GPIO ",
72 "MBOX ",
73 "AXGMAC0",
74 "AXGMAC1",
75 "JTAG",
76 "MPU_INTPEND"
77 };
78 /* UE Status High CSR */
79 static char *ue_status_hi_desc[] = {
80 "LPCMEMHOST",
81 "MGMT_MAC",
82 "PCS0ONLINE",
83 "MPU_IRAM",
84 "PCS1ONLINE",
85 "PCTL0",
86 "PCTL1",
87 "PMEM",
88 "RR",
89 "TXPB",
90 "RXPP",
91 "XAUI",
92 "TXP",
93 "ARM",
94 "IPC",
95 "HOST2",
96 "HOST3",
97 "HOST4",
98 "HOST5",
99 "HOST6",
100 "HOST7",
101 "HOST8",
102 "HOST9",
103 "NETC"
104 "Unknown",
105 "Unknown",
106 "Unknown",
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown"
112 };
113
114 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
115 {
116 struct be_dma_mem *mem = &q->dma_mem;
117 if (mem->va)
118 pci_free_consistent(adapter->pdev, mem->size,
119 mem->va, mem->dma);
120 }
121
122 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
123 u16 len, u16 entry_size)
124 {
125 struct be_dma_mem *mem = &q->dma_mem;
126
127 memset(q, 0, sizeof(*q));
128 q->len = len;
129 q->entry_size = entry_size;
130 mem->size = len * entry_size;
131 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
132 if (!mem->va)
133 return -1;
134 memset(mem->va, 0, mem->size);
135 return 0;
136 }
137
138 static void be_intr_set(struct be_adapter *adapter, bool enable)
139 {
140 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
141 u32 reg = ioread32(addr);
142 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
143
144 if (adapter->eeh_err)
145 return;
146
147 if (!enabled && enable)
148 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
149 else if (enabled && !enable)
150 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
151 else
152 return;
153
154 iowrite32(reg, addr);
155 }
156
157 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
158 {
159 u32 val = 0;
160 val |= qid & DB_RQ_RING_ID_MASK;
161 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
162
163 wmb();
164 iowrite32(val, adapter->db + DB_RQ_OFFSET);
165 }
166
167 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
168 {
169 u32 val = 0;
170 val |= qid & DB_TXULP_RING_ID_MASK;
171 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
172
173 wmb();
174 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
175 }
176
177 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
178 bool arm, bool clear_int, u16 num_popped)
179 {
180 u32 val = 0;
181 val |= qid & DB_EQ_RING_ID_MASK;
182
183 if (adapter->eeh_err)
184 return;
185
186 if (arm)
187 val |= 1 << DB_EQ_REARM_SHIFT;
188 if (clear_int)
189 val |= 1 << DB_EQ_CLR_SHIFT;
190 val |= 1 << DB_EQ_EVNT_SHIFT;
191 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
192 iowrite32(val, adapter->db + DB_EQ_OFFSET);
193 }
194
195 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
196 {
197 u32 val = 0;
198 val |= qid & DB_CQ_RING_ID_MASK;
199
200 if (adapter->eeh_err)
201 return;
202
203 if (arm)
204 val |= 1 << DB_CQ_REARM_SHIFT;
205 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
206 iowrite32(val, adapter->db + DB_CQ_OFFSET);
207 }
208
209 static int be_mac_addr_set(struct net_device *netdev, void *p)
210 {
211 struct be_adapter *adapter = netdev_priv(netdev);
212 struct sockaddr *addr = p;
213 int status = 0;
214
215 if (!is_valid_ether_addr(addr->sa_data))
216 return -EADDRNOTAVAIL;
217
218 /* MAC addr configuration will be done in hardware for VFs
219 * by their corresponding PFs. Just copy to netdev addr here
220 */
221 if (!be_physfn(adapter))
222 goto netdev_addr;
223
224 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
225 if (status)
226 return status;
227
228 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
229 adapter->if_handle, &adapter->pmac_id);
230 netdev_addr:
231 if (!status)
232 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
233
234 return status;
235 }
236
237 void netdev_stats_update(struct be_adapter *adapter)
238 {
239 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
240 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
241 struct be_port_rxf_stats *port_stats =
242 &rxf_stats->port[adapter->port_num];
243 struct net_device_stats *dev_stats = &adapter->netdev->stats;
244 struct be_erx_stats *erx_stats = &hw_stats->erx;
245
246 dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
247 dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
248 dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
249 dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
250
251 /* bad pkts received */
252 dev_stats->rx_errors = port_stats->rx_crc_errors +
253 port_stats->rx_alignment_symbol_errors +
254 port_stats->rx_in_range_errors +
255 port_stats->rx_out_range_errors +
256 port_stats->rx_frame_too_long +
257 port_stats->rx_dropped_too_small +
258 port_stats->rx_dropped_too_short +
259 port_stats->rx_dropped_header_too_small +
260 port_stats->rx_dropped_tcp_length +
261 port_stats->rx_dropped_runt +
262 port_stats->rx_tcp_checksum_errs +
263 port_stats->rx_ip_checksum_errs +
264 port_stats->rx_udp_checksum_errs;
265
266 /* no space in linux buffers: best possible approximation */
267 dev_stats->rx_dropped =
268 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
269
270 /* detailed rx errors */
271 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
272 port_stats->rx_out_range_errors +
273 port_stats->rx_frame_too_long;
274
275 /* receive ring buffer overflow */
276 dev_stats->rx_over_errors = 0;
277
278 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
279
280 /* frame alignment errors */
281 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
282
283 /* receiver fifo overrun */
284 /* drops_no_pbuf is no per i/f, it's per BE card */
285 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
286 port_stats->rx_input_fifo_overflow +
287 rxf_stats->rx_drops_no_pbuf;
288 /* receiver missed packetd */
289 dev_stats->rx_missed_errors = 0;
290
291 /* packet transmit problems */
292 dev_stats->tx_errors = 0;
293
294 /* no space available in linux */
295 dev_stats->tx_dropped = 0;
296
297 dev_stats->multicast = port_stats->rx_multicast_frames;
298 dev_stats->collisions = 0;
299
300 /* detailed tx_errors */
301 dev_stats->tx_aborted_errors = 0;
302 dev_stats->tx_carrier_errors = 0;
303 dev_stats->tx_fifo_errors = 0;
304 dev_stats->tx_heartbeat_errors = 0;
305 dev_stats->tx_window_errors = 0;
306 }
307
308 void be_link_status_update(struct be_adapter *adapter, bool link_up)
309 {
310 struct net_device *netdev = adapter->netdev;
311
312 /* If link came up or went down */
313 if (adapter->link_up != link_up) {
314 adapter->link_speed = -1;
315 if (link_up) {
316 netif_start_queue(netdev);
317 netif_carrier_on(netdev);
318 printk(KERN_INFO "%s: Link up\n", netdev->name);
319 } else {
320 netif_stop_queue(netdev);
321 netif_carrier_off(netdev);
322 printk(KERN_INFO "%s: Link down\n", netdev->name);
323 }
324 adapter->link_up = link_up;
325 }
326 }
327
328 /* Update the EQ delay n BE based on the RX frags consumed / sec */
329 static void be_rx_eqd_update(struct be_adapter *adapter)
330 {
331 struct be_eq_obj *rx_eq = &adapter->rx_eq;
332 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
333 ulong now = jiffies;
334 u32 eqd;
335
336 if (!rx_eq->enable_aic)
337 return;
338
339 /* Wrapped around */
340 if (time_before(now, stats->rx_fps_jiffies)) {
341 stats->rx_fps_jiffies = now;
342 return;
343 }
344
345 /* Update once a second */
346 if ((now - stats->rx_fps_jiffies) < HZ)
347 return;
348
349 stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
350 ((now - stats->rx_fps_jiffies) / HZ);
351
352 stats->rx_fps_jiffies = now;
353 stats->be_prev_rx_frags = stats->be_rx_frags;
354 eqd = stats->be_rx_fps / 110000;
355 eqd = eqd << 3;
356 if (eqd > rx_eq->max_eqd)
357 eqd = rx_eq->max_eqd;
358 if (eqd < rx_eq->min_eqd)
359 eqd = rx_eq->min_eqd;
360 if (eqd < 10)
361 eqd = 0;
362 if (eqd != rx_eq->cur_eqd)
363 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
364
365 rx_eq->cur_eqd = eqd;
366 }
367
368 static struct net_device_stats *be_get_stats(struct net_device *dev)
369 {
370 return &dev->stats;
371 }
372
373 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
374 {
375 u64 rate = bytes;
376
377 do_div(rate, ticks / HZ);
378 rate <<= 3; /* bytes/sec -> bits/sec */
379 do_div(rate, 1000000ul); /* MB/Sec */
380
381 return rate;
382 }
383
384 static void be_tx_rate_update(struct be_adapter *adapter)
385 {
386 struct be_drvr_stats *stats = drvr_stats(adapter);
387 ulong now = jiffies;
388
389 /* Wrapped around? */
390 if (time_before(now, stats->be_tx_jiffies)) {
391 stats->be_tx_jiffies = now;
392 return;
393 }
394
395 /* Update tx rate once in two seconds */
396 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
397 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
398 - stats->be_tx_bytes_prev,
399 now - stats->be_tx_jiffies);
400 stats->be_tx_jiffies = now;
401 stats->be_tx_bytes_prev = stats->be_tx_bytes;
402 }
403 }
404
405 static void be_tx_stats_update(struct be_adapter *adapter,
406 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
407 {
408 struct be_drvr_stats *stats = drvr_stats(adapter);
409 stats->be_tx_reqs++;
410 stats->be_tx_wrbs += wrb_cnt;
411 stats->be_tx_bytes += copied;
412 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
413 if (stopped)
414 stats->be_tx_stops++;
415 }
416
417 /* Determine number of WRB entries needed to xmit data in an skb */
418 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
419 {
420 int cnt = (skb->len > skb->data_len);
421
422 cnt += skb_shinfo(skb)->nr_frags;
423
424 /* to account for hdr wrb */
425 cnt++;
426 if (cnt & 1) {
427 /* add a dummy to make it an even num */
428 cnt++;
429 *dummy = true;
430 } else
431 *dummy = false;
432 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
433 return cnt;
434 }
435
436 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
437 {
438 wrb->frag_pa_hi = upper_32_bits(addr);
439 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
440 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
441 }
442
443 static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
444 bool vlan, u32 wrb_cnt, u32 len)
445 {
446 memset(hdr, 0, sizeof(*hdr));
447
448 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
449
450 if (skb_is_gso(skb)) {
451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
453 hdr, skb_shinfo(skb)->gso_size);
454 if (skb_is_gso_v6(skb))
455 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
456 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
457 if (is_tcp_pkt(skb))
458 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
459 else if (is_udp_pkt(skb))
460 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
461 }
462
463 if (vlan && vlan_tx_tag_present(skb)) {
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
465 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
466 hdr, vlan_tx_tag_get(skb));
467 }
468
469 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
470 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
471 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
472 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
473 }
474
475 static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
476 bool unmap_single)
477 {
478 dma_addr_t dma;
479
480 be_dws_le_to_cpu(wrb, sizeof(*wrb));
481
482 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
483 if (wrb->frag_len) {
484 if (unmap_single)
485 pci_unmap_single(pdev, dma, wrb->frag_len,
486 PCI_DMA_TODEVICE);
487 else
488 pci_unmap_page(pdev, dma, wrb->frag_len,
489 PCI_DMA_TODEVICE);
490 }
491 }
492
493 static int make_tx_wrbs(struct be_adapter *adapter,
494 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
495 {
496 dma_addr_t busaddr;
497 int i, copied = 0;
498 struct pci_dev *pdev = adapter->pdev;
499 struct sk_buff *first_skb = skb;
500 struct be_queue_info *txq = &adapter->tx_obj.q;
501 struct be_eth_wrb *wrb;
502 struct be_eth_hdr_wrb *hdr;
503 bool map_single = false;
504 u16 map_head;
505
506 hdr = queue_head_node(txq);
507 queue_head_inc(txq);
508 map_head = txq->head;
509
510 if (skb->len > skb->data_len) {
511 int len = skb_headlen(skb);
512 busaddr = pci_map_single(pdev, skb->data, len,
513 PCI_DMA_TODEVICE);
514 if (pci_dma_mapping_error(pdev, busaddr))
515 goto dma_err;
516 map_single = true;
517 wrb = queue_head_node(txq);
518 wrb_fill(wrb, busaddr, len);
519 be_dws_cpu_to_le(wrb, sizeof(*wrb));
520 queue_head_inc(txq);
521 copied += len;
522 }
523
524 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
525 struct skb_frag_struct *frag =
526 &skb_shinfo(skb)->frags[i];
527 busaddr = pci_map_page(pdev, frag->page,
528 frag->page_offset,
529 frag->size, PCI_DMA_TODEVICE);
530 if (pci_dma_mapping_error(pdev, busaddr))
531 goto dma_err;
532 wrb = queue_head_node(txq);
533 wrb_fill(wrb, busaddr, frag->size);
534 be_dws_cpu_to_le(wrb, sizeof(*wrb));
535 queue_head_inc(txq);
536 copied += frag->size;
537 }
538
539 if (dummy_wrb) {
540 wrb = queue_head_node(txq);
541 wrb_fill(wrb, 0, 0);
542 be_dws_cpu_to_le(wrb, sizeof(*wrb));
543 queue_head_inc(txq);
544 }
545
546 wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
547 wrb_cnt, copied);
548 be_dws_cpu_to_le(hdr, sizeof(*hdr));
549
550 return copied;
551 dma_err:
552 txq->head = map_head;
553 while (copied) {
554 wrb = queue_head_node(txq);
555 unmap_tx_frag(pdev, wrb, map_single);
556 map_single = false;
557 copied -= wrb->frag_len;
558 queue_head_inc(txq);
559 }
560 return 0;
561 }
562
563 static netdev_tx_t be_xmit(struct sk_buff *skb,
564 struct net_device *netdev)
565 {
566 struct be_adapter *adapter = netdev_priv(netdev);
567 struct be_tx_obj *tx_obj = &adapter->tx_obj;
568 struct be_queue_info *txq = &tx_obj->q;
569 u32 wrb_cnt = 0, copied = 0;
570 u32 start = txq->head;
571 bool dummy_wrb, stopped = false;
572
573 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
574
575 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
576 if (copied) {
577 /* record the sent skb in the sent_skb table */
578 BUG_ON(tx_obj->sent_skb_list[start]);
579 tx_obj->sent_skb_list[start] = skb;
580
581 /* Ensure txq has space for the next skb; Else stop the queue
582 * *BEFORE* ringing the tx doorbell, so that we serialze the
583 * tx compls of the current transmit which'll wake up the queue
584 */
585 atomic_add(wrb_cnt, &txq->used);
586 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
587 txq->len) {
588 netif_stop_queue(netdev);
589 stopped = true;
590 }
591
592 be_txq_notify(adapter, txq->id, wrb_cnt);
593
594 be_tx_stats_update(adapter, wrb_cnt, copied,
595 skb_shinfo(skb)->gso_segs, stopped);
596 } else {
597 txq->head = start;
598 dev_kfree_skb_any(skb);
599 }
600 return NETDEV_TX_OK;
601 }
602
603 static int be_change_mtu(struct net_device *netdev, int new_mtu)
604 {
605 struct be_adapter *adapter = netdev_priv(netdev);
606 if (new_mtu < BE_MIN_MTU ||
607 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
608 (ETH_HLEN + ETH_FCS_LEN))) {
609 dev_info(&adapter->pdev->dev,
610 "MTU must be between %d and %d bytes\n",
611 BE_MIN_MTU,
612 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
613 return -EINVAL;
614 }
615 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
616 netdev->mtu, new_mtu);
617 netdev->mtu = new_mtu;
618 return 0;
619 }
620
621 /*
622 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
623 * If the user configures more, place BE in vlan promiscuous mode.
624 */
625 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
626 {
627 u16 vtag[BE_NUM_VLANS_SUPPORTED];
628 u16 ntags = 0, i;
629 int status = 0;
630 u32 if_handle;
631
632 if (vf) {
633 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
634 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
635 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
636 }
637
638 if (adapter->vlans_added <= adapter->max_vlans) {
639 /* Construct VLAN Table to give to HW */
640 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
641 if (adapter->vlan_tag[i]) {
642 vtag[ntags] = cpu_to_le16(i);
643 ntags++;
644 }
645 }
646 status = be_cmd_vlan_config(adapter, adapter->if_handle,
647 vtag, ntags, 1, 0);
648 } else {
649 status = be_cmd_vlan_config(adapter, adapter->if_handle,
650 NULL, 0, 1, 1);
651 }
652
653 return status;
654 }
655
656 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
657 {
658 struct be_adapter *adapter = netdev_priv(netdev);
659 struct be_eq_obj *rx_eq = &adapter->rx_eq;
660 struct be_eq_obj *tx_eq = &adapter->tx_eq;
661
662 be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
663 be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
664 adapter->vlan_grp = grp;
665 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
666 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
667 }
668
669 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
670 {
671 struct be_adapter *adapter = netdev_priv(netdev);
672
673 adapter->vlans_added++;
674 if (!be_physfn(adapter))
675 return;
676
677 adapter->vlan_tag[vid] = 1;
678 if (adapter->vlans_added <= (adapter->max_vlans + 1))
679 be_vid_config(adapter, false, 0);
680 }
681
682 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
683 {
684 struct be_adapter *adapter = netdev_priv(netdev);
685
686 adapter->vlans_added--;
687 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
688
689 if (!be_physfn(adapter))
690 return;
691
692 adapter->vlan_tag[vid] = 0;
693 if (adapter->vlans_added <= adapter->max_vlans)
694 be_vid_config(adapter, false, 0);
695 }
696
697 static void be_set_multicast_list(struct net_device *netdev)
698 {
699 struct be_adapter *adapter = netdev_priv(netdev);
700
701 if (netdev->flags & IFF_PROMISC) {
702 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
703 adapter->promiscuous = true;
704 goto done;
705 }
706
707 /* BE was previously in promiscous mode; disable it */
708 if (adapter->promiscuous) {
709 adapter->promiscuous = false;
710 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
711 }
712
713 /* Enable multicast promisc if num configured exceeds what we support */
714 if (netdev->flags & IFF_ALLMULTI ||
715 netdev_mc_count(netdev) > BE_MAX_MC) {
716 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
717 &adapter->mc_cmd_mem);
718 goto done;
719 }
720
721 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
722 &adapter->mc_cmd_mem);
723 done:
724 return;
725 }
726
727 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
728 {
729 struct be_adapter *adapter = netdev_priv(netdev);
730 int status;
731
732 if (!adapter->sriov_enabled)
733 return -EPERM;
734
735 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
736 return -EINVAL;
737
738 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
739 status = be_cmd_pmac_del(adapter,
740 adapter->vf_cfg[vf].vf_if_handle,
741 adapter->vf_cfg[vf].vf_pmac_id);
742
743 status = be_cmd_pmac_add(adapter, mac,
744 adapter->vf_cfg[vf].vf_if_handle,
745 &adapter->vf_cfg[vf].vf_pmac_id);
746
747 if (status)
748 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
749 mac, vf);
750 else
751 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
752
753 return status;
754 }
755
756 static int be_get_vf_config(struct net_device *netdev, int vf,
757 struct ifla_vf_info *vi)
758 {
759 struct be_adapter *adapter = netdev_priv(netdev);
760
761 if (!adapter->sriov_enabled)
762 return -EPERM;
763
764 if (vf >= num_vfs)
765 return -EINVAL;
766
767 vi->vf = vf;
768 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
769 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
770 vi->qos = 0;
771 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
772
773 return 0;
774 }
775
776 static int be_set_vf_vlan(struct net_device *netdev,
777 int vf, u16 vlan, u8 qos)
778 {
779 struct be_adapter *adapter = netdev_priv(netdev);
780 int status = 0;
781
782 if (!adapter->sriov_enabled)
783 return -EPERM;
784
785 if ((vf >= num_vfs) || (vlan > 4095))
786 return -EINVAL;
787
788 if (vlan) {
789 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
790 adapter->vlans_added++;
791 } else {
792 adapter->vf_cfg[vf].vf_vlan_tag = 0;
793 adapter->vlans_added--;
794 }
795
796 status = be_vid_config(adapter, true, vf);
797
798 if (status)
799 dev_info(&adapter->pdev->dev,
800 "VLAN %d config on VF %d failed\n", vlan, vf);
801 return status;
802 }
803
804 static int be_set_vf_tx_rate(struct net_device *netdev,
805 int vf, int rate)
806 {
807 struct be_adapter *adapter = netdev_priv(netdev);
808 int status = 0;
809
810 if (!adapter->sriov_enabled)
811 return -EPERM;
812
813 if ((vf >= num_vfs) || (rate < 0))
814 return -EINVAL;
815
816 if (rate > 10000)
817 rate = 10000;
818
819 adapter->vf_cfg[vf].vf_tx_rate = rate;
820 status = be_cmd_set_qos(adapter, rate / 10, vf);
821
822 if (status)
823 dev_info(&adapter->pdev->dev,
824 "tx rate %d on VF %d failed\n", rate, vf);
825 return status;
826 }
827
828 static void be_rx_rate_update(struct be_adapter *adapter)
829 {
830 struct be_drvr_stats *stats = drvr_stats(adapter);
831 ulong now = jiffies;
832
833 /* Wrapped around */
834 if (time_before(now, stats->be_rx_jiffies)) {
835 stats->be_rx_jiffies = now;
836 return;
837 }
838
839 /* Update the rate once in two seconds */
840 if ((now - stats->be_rx_jiffies) < 2 * HZ)
841 return;
842
843 stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
844 - stats->be_rx_bytes_prev,
845 now - stats->be_rx_jiffies);
846 stats->be_rx_jiffies = now;
847 stats->be_rx_bytes_prev = stats->be_rx_bytes;
848 }
849
850 static void be_rx_stats_update(struct be_adapter *adapter,
851 u32 pktsize, u16 numfrags)
852 {
853 struct be_drvr_stats *stats = drvr_stats(adapter);
854
855 stats->be_rx_compl++;
856 stats->be_rx_frags += numfrags;
857 stats->be_rx_bytes += pktsize;
858 stats->be_rx_pkts++;
859 }
860
861 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
862 {
863 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
864
865 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
866 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
867 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
868 if (ip_version) {
869 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
870 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
871 }
872 ipv6_chk = (ip_version && (tcpf || udpf));
873
874 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
875 }
876
877 static struct be_rx_page_info *
878 get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
879 {
880 struct be_rx_page_info *rx_page_info;
881 struct be_queue_info *rxq = &adapter->rx_obj.q;
882
883 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
884 BUG_ON(!rx_page_info->page);
885
886 if (rx_page_info->last_page_user) {
887 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
888 adapter->big_page_size, PCI_DMA_FROMDEVICE);
889 rx_page_info->last_page_user = false;
890 }
891
892 atomic_dec(&rxq->used);
893 return rx_page_info;
894 }
895
896 /* Throwaway the data in the Rx completion */
897 static void be_rx_compl_discard(struct be_adapter *adapter,
898 struct be_eth_rx_compl *rxcp)
899 {
900 struct be_queue_info *rxq = &adapter->rx_obj.q;
901 struct be_rx_page_info *page_info;
902 u16 rxq_idx, i, num_rcvd;
903
904 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
905 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
906
907 for (i = 0; i < num_rcvd; i++) {
908 page_info = get_rx_page_info(adapter, rxq_idx);
909 put_page(page_info->page);
910 memset(page_info, 0, sizeof(*page_info));
911 index_inc(&rxq_idx, rxq->len);
912 }
913 }
914
915 /*
916 * skb_fill_rx_data forms a complete skb for an ether frame
917 * indicated by rxcp.
918 */
919 static void skb_fill_rx_data(struct be_adapter *adapter,
920 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
921 u16 num_rcvd)
922 {
923 struct be_queue_info *rxq = &adapter->rx_obj.q;
924 struct be_rx_page_info *page_info;
925 u16 rxq_idx, i, j;
926 u32 pktsize, hdr_len, curr_frag_len, size;
927 u8 *start;
928
929 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
930 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
931
932 page_info = get_rx_page_info(adapter, rxq_idx);
933
934 start = page_address(page_info->page) + page_info->page_offset;
935 prefetch(start);
936
937 /* Copy data in the first descriptor of this completion */
938 curr_frag_len = min(pktsize, rx_frag_size);
939
940 /* Copy the header portion into skb_data */
941 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
942 memcpy(skb->data, start, hdr_len);
943 skb->len = curr_frag_len;
944 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
945 /* Complete packet has now been moved to data */
946 put_page(page_info->page);
947 skb->data_len = 0;
948 skb->tail += curr_frag_len;
949 } else {
950 skb_shinfo(skb)->nr_frags = 1;
951 skb_shinfo(skb)->frags[0].page = page_info->page;
952 skb_shinfo(skb)->frags[0].page_offset =
953 page_info->page_offset + hdr_len;
954 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
955 skb->data_len = curr_frag_len - hdr_len;
956 skb->tail += hdr_len;
957 }
958 page_info->page = NULL;
959
960 if (pktsize <= rx_frag_size) {
961 BUG_ON(num_rcvd != 1);
962 goto done;
963 }
964
965 /* More frags present for this completion */
966 size = pktsize;
967 for (i = 1, j = 0; i < num_rcvd; i++) {
968 size -= curr_frag_len;
969 index_inc(&rxq_idx, rxq->len);
970 page_info = get_rx_page_info(adapter, rxq_idx);
971
972 curr_frag_len = min(size, rx_frag_size);
973
974 /* Coalesce all frags from the same physical page in one slot */
975 if (page_info->page_offset == 0) {
976 /* Fresh page */
977 j++;
978 skb_shinfo(skb)->frags[j].page = page_info->page;
979 skb_shinfo(skb)->frags[j].page_offset =
980 page_info->page_offset;
981 skb_shinfo(skb)->frags[j].size = 0;
982 skb_shinfo(skb)->nr_frags++;
983 } else {
984 put_page(page_info->page);
985 }
986
987 skb_shinfo(skb)->frags[j].size += curr_frag_len;
988 skb->len += curr_frag_len;
989 skb->data_len += curr_frag_len;
990
991 page_info->page = NULL;
992 }
993 BUG_ON(j > MAX_SKB_FRAGS);
994
995 done:
996 be_rx_stats_update(adapter, pktsize, num_rcvd);
997 }
998
999 /* Process the RX completion indicated by rxcp when GRO is disabled */
1000 static void be_rx_compl_process(struct be_adapter *adapter,
1001 struct be_eth_rx_compl *rxcp)
1002 {
1003 struct sk_buff *skb;
1004 u32 vlanf, vid;
1005 u16 num_rcvd;
1006 u8 vtm;
1007
1008 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1009 /* Is it a flush compl that has no data */
1010 if (unlikely(num_rcvd == 0))
1011 return;
1012
1013 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1014 if (unlikely(!skb)) {
1015 if (net_ratelimit())
1016 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1017 be_rx_compl_discard(adapter, rxcp);
1018 return;
1019 }
1020
1021 skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
1022
1023 if (do_pkt_csum(rxcp, adapter->rx_csum))
1024 skb->ip_summed = CHECKSUM_NONE;
1025 else
1026 skb->ip_summed = CHECKSUM_UNNECESSARY;
1027
1028 skb->truesize = skb->len + sizeof(struct sk_buff);
1029 skb->protocol = eth_type_trans(skb, adapter->netdev);
1030
1031 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1032 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1033
1034 /* vlanf could be wrongly set in some cards.
1035 * ignore if vtm is not set */
1036 if ((adapter->function_mode & 0x400) && !vtm)
1037 vlanf = 0;
1038
1039 if (unlikely(vlanf)) {
1040 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1041 kfree_skb(skb);
1042 return;
1043 }
1044 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1045 vid = swab16(vid);
1046 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1047 } else {
1048 netif_receive_skb(skb);
1049 }
1050 }
1051
1052 /* Process the RX completion indicated by rxcp when GRO is enabled */
1053 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1054 struct be_eth_rx_compl *rxcp)
1055 {
1056 struct be_rx_page_info *page_info;
1057 struct sk_buff *skb = NULL;
1058 struct be_queue_info *rxq = &adapter->rx_obj.q;
1059 struct be_eq_obj *eq_obj = &adapter->rx_eq;
1060 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1061 u16 i, rxq_idx = 0, vid, j;
1062 u8 vtm;
1063
1064 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1065 /* Is it a flush compl that has no data */
1066 if (unlikely(num_rcvd == 0))
1067 return;
1068
1069 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1070 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1071 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1072 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1073
1074 /* vlanf could be wrongly set in some cards.
1075 * ignore if vtm is not set */
1076 if ((adapter->function_mode & 0x400) && !vtm)
1077 vlanf = 0;
1078
1079 skb = napi_get_frags(&eq_obj->napi);
1080 if (!skb) {
1081 be_rx_compl_discard(adapter, rxcp);
1082 return;
1083 }
1084
1085 remaining = pkt_size;
1086 for (i = 0, j = -1; i < num_rcvd; i++) {
1087 page_info = get_rx_page_info(adapter, rxq_idx);
1088
1089 curr_frag_len = min(remaining, rx_frag_size);
1090
1091 /* Coalesce all frags from the same physical page in one slot */
1092 if (i == 0 || page_info->page_offset == 0) {
1093 /* First frag or Fresh page */
1094 j++;
1095 skb_shinfo(skb)->frags[j].page = page_info->page;
1096 skb_shinfo(skb)->frags[j].page_offset =
1097 page_info->page_offset;
1098 skb_shinfo(skb)->frags[j].size = 0;
1099 } else {
1100 put_page(page_info->page);
1101 }
1102 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1103
1104 remaining -= curr_frag_len;
1105 index_inc(&rxq_idx, rxq->len);
1106 memset(page_info, 0, sizeof(*page_info));
1107 }
1108 BUG_ON(j > MAX_SKB_FRAGS);
1109
1110 skb_shinfo(skb)->nr_frags = j + 1;
1111 skb->len = pkt_size;
1112 skb->data_len = pkt_size;
1113 skb->truesize += pkt_size;
1114 skb->ip_summed = CHECKSUM_UNNECESSARY;
1115
1116 if (likely(!vlanf)) {
1117 napi_gro_frags(&eq_obj->napi);
1118 } else {
1119 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1120 vid = swab16(vid);
1121
1122 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1123 return;
1124
1125 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1126 }
1127
1128 be_rx_stats_update(adapter, pkt_size, num_rcvd);
1129 }
1130
1131 static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
1132 {
1133 struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
1134
1135 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1136 return NULL;
1137
1138 rmb();
1139 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1140
1141 queue_tail_inc(&adapter->rx_obj.cq);
1142 return rxcp;
1143 }
1144
1145 /* To reset the valid bit, we need to reset the whole word as
1146 * when walking the queue the valid entries are little-endian
1147 * and invalid entries are host endian
1148 */
1149 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1150 {
1151 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1152 }
1153
1154 static inline struct page *be_alloc_pages(u32 size)
1155 {
1156 gfp_t alloc_flags = GFP_ATOMIC;
1157 u32 order = get_order(size);
1158 if (order > 0)
1159 alloc_flags |= __GFP_COMP;
1160 return alloc_pages(alloc_flags, order);
1161 }
1162
1163 /*
1164 * Allocate a page, split it to fragments of size rx_frag_size and post as
1165 * receive buffers to BE
1166 */
1167 static void be_post_rx_frags(struct be_adapter *adapter)
1168 {
1169 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
1170 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1171 struct be_queue_info *rxq = &adapter->rx_obj.q;
1172 struct page *pagep = NULL;
1173 struct be_eth_rx_d *rxd;
1174 u64 page_dmaaddr = 0, frag_dmaaddr;
1175 u32 posted, page_offset = 0;
1176
1177 page_info = &page_info_tbl[rxq->head];
1178 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1179 if (!pagep) {
1180 pagep = be_alloc_pages(adapter->big_page_size);
1181 if (unlikely(!pagep)) {
1182 drvr_stats(adapter)->be_ethrx_post_fail++;
1183 break;
1184 }
1185 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1186 adapter->big_page_size,
1187 PCI_DMA_FROMDEVICE);
1188 page_info->page_offset = 0;
1189 } else {
1190 get_page(pagep);
1191 page_info->page_offset = page_offset + rx_frag_size;
1192 }
1193 page_offset = page_info->page_offset;
1194 page_info->page = pagep;
1195 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1196 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1197
1198 rxd = queue_head_node(rxq);
1199 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1200 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1201
1202 /* Any space left in the current big page for another frag? */
1203 if ((page_offset + rx_frag_size + rx_frag_size) >
1204 adapter->big_page_size) {
1205 pagep = NULL;
1206 page_info->last_page_user = true;
1207 }
1208
1209 prev_page_info = page_info;
1210 queue_head_inc(rxq);
1211 page_info = &page_info_tbl[rxq->head];
1212 }
1213 if (pagep)
1214 prev_page_info->last_page_user = true;
1215
1216 if (posted) {
1217 atomic_add(posted, &rxq->used);
1218 be_rxq_notify(adapter, rxq->id, posted);
1219 } else if (atomic_read(&rxq->used) == 0) {
1220 /* Let be_worker replenish when memory is available */
1221 adapter->rx_post_starved = true;
1222 }
1223 }
1224
1225 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1226 {
1227 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1228
1229 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1230 return NULL;
1231
1232 rmb();
1233 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1234
1235 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1236
1237 queue_tail_inc(tx_cq);
1238 return txcp;
1239 }
1240
1241 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1242 {
1243 struct be_queue_info *txq = &adapter->tx_obj.q;
1244 struct be_eth_wrb *wrb;
1245 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1246 struct sk_buff *sent_skb;
1247 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1248 bool unmap_skb_hdr = true;
1249
1250 sent_skb = sent_skbs[txq->tail];
1251 BUG_ON(!sent_skb);
1252 sent_skbs[txq->tail] = NULL;
1253
1254 /* skip header wrb */
1255 queue_tail_inc(txq);
1256
1257 do {
1258 cur_index = txq->tail;
1259 wrb = queue_tail_node(txq);
1260 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1261 skb_headlen(sent_skb)));
1262 unmap_skb_hdr = false;
1263
1264 num_wrbs++;
1265 queue_tail_inc(txq);
1266 } while (cur_index != last_index);
1267
1268 atomic_sub(num_wrbs, &txq->used);
1269
1270 kfree_skb(sent_skb);
1271 }
1272
1273 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1274 {
1275 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1276
1277 if (!eqe->evt)
1278 return NULL;
1279
1280 rmb();
1281 eqe->evt = le32_to_cpu(eqe->evt);
1282 queue_tail_inc(&eq_obj->q);
1283 return eqe;
1284 }
1285
1286 static int event_handle(struct be_adapter *adapter,
1287 struct be_eq_obj *eq_obj)
1288 {
1289 struct be_eq_entry *eqe;
1290 u16 num = 0;
1291
1292 while ((eqe = event_get(eq_obj)) != NULL) {
1293 eqe->evt = 0;
1294 num++;
1295 }
1296
1297 /* Deal with any spurious interrupts that come
1298 * without events
1299 */
1300 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1301 if (num)
1302 napi_schedule(&eq_obj->napi);
1303
1304 return num;
1305 }
1306
1307 /* Just read and notify events without processing them.
1308 * Used at the time of destroying event queues */
1309 static void be_eq_clean(struct be_adapter *adapter,
1310 struct be_eq_obj *eq_obj)
1311 {
1312 struct be_eq_entry *eqe;
1313 u16 num = 0;
1314
1315 while ((eqe = event_get(eq_obj)) != NULL) {
1316 eqe->evt = 0;
1317 num++;
1318 }
1319
1320 if (num)
1321 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1322 }
1323
1324 static void be_rx_q_clean(struct be_adapter *adapter)
1325 {
1326 struct be_rx_page_info *page_info;
1327 struct be_queue_info *rxq = &adapter->rx_obj.q;
1328 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1329 struct be_eth_rx_compl *rxcp;
1330 u16 tail;
1331
1332 /* First cleanup pending rx completions */
1333 while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1334 be_rx_compl_discard(adapter, rxcp);
1335 be_rx_compl_reset(rxcp);
1336 be_cq_notify(adapter, rx_cq->id, true, 1);
1337 }
1338
1339 /* Then free posted rx buffer that were not used */
1340 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1341 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1342 page_info = get_rx_page_info(adapter, tail);
1343 put_page(page_info->page);
1344 memset(page_info, 0, sizeof(*page_info));
1345 }
1346 BUG_ON(atomic_read(&rxq->used));
1347 }
1348
1349 static void be_tx_compl_clean(struct be_adapter *adapter)
1350 {
1351 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1352 struct be_queue_info *txq = &adapter->tx_obj.q;
1353 struct be_eth_tx_compl *txcp;
1354 u16 end_idx, cmpl = 0, timeo = 0;
1355 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1356 struct sk_buff *sent_skb;
1357 bool dummy_wrb;
1358
1359 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1360 do {
1361 while ((txcp = be_tx_compl_get(tx_cq))) {
1362 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1363 wrb_index, txcp);
1364 be_tx_compl_process(adapter, end_idx);
1365 cmpl++;
1366 }
1367 if (cmpl) {
1368 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1369 cmpl = 0;
1370 }
1371
1372 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1373 break;
1374
1375 mdelay(1);
1376 } while (true);
1377
1378 if (atomic_read(&txq->used))
1379 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1380 atomic_read(&txq->used));
1381
1382 /* free posted tx for which compls will never arrive */
1383 while (atomic_read(&txq->used)) {
1384 sent_skb = sent_skbs[txq->tail];
1385 end_idx = txq->tail;
1386 index_adv(&end_idx,
1387 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1388 be_tx_compl_process(adapter, end_idx);
1389 }
1390 }
1391
1392 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1393 {
1394 struct be_queue_info *q;
1395
1396 q = &adapter->mcc_obj.q;
1397 if (q->created)
1398 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1399 be_queue_free(adapter, q);
1400
1401 q = &adapter->mcc_obj.cq;
1402 if (q->created)
1403 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1404 be_queue_free(adapter, q);
1405 }
1406
1407 /* Must be called only after TX qs are created as MCC shares TX EQ */
1408 static int be_mcc_queues_create(struct be_adapter *adapter)
1409 {
1410 struct be_queue_info *q, *cq;
1411
1412 /* Alloc MCC compl queue */
1413 cq = &adapter->mcc_obj.cq;
1414 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1415 sizeof(struct be_mcc_compl)))
1416 goto err;
1417
1418 /* Ask BE to create MCC compl queue; share TX's eq */
1419 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1420 goto mcc_cq_free;
1421
1422 /* Alloc MCC queue */
1423 q = &adapter->mcc_obj.q;
1424 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1425 goto mcc_cq_destroy;
1426
1427 /* Ask BE to create MCC queue */
1428 if (be_cmd_mccq_create(adapter, q, cq))
1429 goto mcc_q_free;
1430
1431 return 0;
1432
1433 mcc_q_free:
1434 be_queue_free(adapter, q);
1435 mcc_cq_destroy:
1436 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1437 mcc_cq_free:
1438 be_queue_free(adapter, cq);
1439 err:
1440 return -1;
1441 }
1442
1443 static void be_tx_queues_destroy(struct be_adapter *adapter)
1444 {
1445 struct be_queue_info *q;
1446
1447 q = &adapter->tx_obj.q;
1448 if (q->created)
1449 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1450 be_queue_free(adapter, q);
1451
1452 q = &adapter->tx_obj.cq;
1453 if (q->created)
1454 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1455 be_queue_free(adapter, q);
1456
1457 /* Clear any residual events */
1458 be_eq_clean(adapter, &adapter->tx_eq);
1459
1460 q = &adapter->tx_eq.q;
1461 if (q->created)
1462 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1463 be_queue_free(adapter, q);
1464 }
1465
1466 static int be_tx_queues_create(struct be_adapter *adapter)
1467 {
1468 struct be_queue_info *eq, *q, *cq;
1469
1470 adapter->tx_eq.max_eqd = 0;
1471 adapter->tx_eq.min_eqd = 0;
1472 adapter->tx_eq.cur_eqd = 96;
1473 adapter->tx_eq.enable_aic = false;
1474 /* Alloc Tx Event queue */
1475 eq = &adapter->tx_eq.q;
1476 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1477 return -1;
1478
1479 /* Ask BE to create Tx Event queue */
1480 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1481 goto tx_eq_free;
1482 adapter->base_eq_id = adapter->tx_eq.q.id;
1483
1484 /* Alloc TX eth compl queue */
1485 cq = &adapter->tx_obj.cq;
1486 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1487 sizeof(struct be_eth_tx_compl)))
1488 goto tx_eq_destroy;
1489
1490 /* Ask BE to create Tx eth compl queue */
1491 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1492 goto tx_cq_free;
1493
1494 /* Alloc TX eth queue */
1495 q = &adapter->tx_obj.q;
1496 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1497 goto tx_cq_destroy;
1498
1499 /* Ask BE to create Tx eth queue */
1500 if (be_cmd_txq_create(adapter, q, cq))
1501 goto tx_q_free;
1502 return 0;
1503
1504 tx_q_free:
1505 be_queue_free(adapter, q);
1506 tx_cq_destroy:
1507 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1508 tx_cq_free:
1509 be_queue_free(adapter, cq);
1510 tx_eq_destroy:
1511 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1512 tx_eq_free:
1513 be_queue_free(adapter, eq);
1514 return -1;
1515 }
1516
1517 static void be_rx_queues_destroy(struct be_adapter *adapter)
1518 {
1519 struct be_queue_info *q;
1520
1521 q = &adapter->rx_obj.q;
1522 if (q->created) {
1523 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1524
1525 /* After the rxq is invalidated, wait for a grace time
1526 * of 1ms for all dma to end and the flush compl to arrive
1527 */
1528 mdelay(1);
1529 be_rx_q_clean(adapter);
1530 }
1531 be_queue_free(adapter, q);
1532
1533 q = &adapter->rx_obj.cq;
1534 if (q->created)
1535 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1536 be_queue_free(adapter, q);
1537
1538 /* Clear any residual events */
1539 be_eq_clean(adapter, &adapter->rx_eq);
1540
1541 q = &adapter->rx_eq.q;
1542 if (q->created)
1543 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1544 be_queue_free(adapter, q);
1545 }
1546
1547 static int be_rx_queues_create(struct be_adapter *adapter)
1548 {
1549 struct be_queue_info *eq, *q, *cq;
1550 int rc;
1551
1552 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1553 adapter->rx_eq.max_eqd = BE_MAX_EQD;
1554 adapter->rx_eq.min_eqd = 0;
1555 adapter->rx_eq.cur_eqd = 0;
1556 adapter->rx_eq.enable_aic = true;
1557
1558 /* Alloc Rx Event queue */
1559 eq = &adapter->rx_eq.q;
1560 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1561 sizeof(struct be_eq_entry));
1562 if (rc)
1563 return rc;
1564
1565 /* Ask BE to create Rx Event queue */
1566 rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
1567 if (rc)
1568 goto rx_eq_free;
1569
1570 /* Alloc RX eth compl queue */
1571 cq = &adapter->rx_obj.cq;
1572 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1573 sizeof(struct be_eth_rx_compl));
1574 if (rc)
1575 goto rx_eq_destroy;
1576
1577 /* Ask BE to create Rx eth compl queue */
1578 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1579 if (rc)
1580 goto rx_cq_free;
1581
1582 /* Alloc RX eth queue */
1583 q = &adapter->rx_obj.q;
1584 rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1585 if (rc)
1586 goto rx_cq_destroy;
1587
1588 /* Ask BE to create Rx eth queue */
1589 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1590 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1591 if (rc)
1592 goto rx_q_free;
1593
1594 return 0;
1595 rx_q_free:
1596 be_queue_free(adapter, q);
1597 rx_cq_destroy:
1598 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1599 rx_cq_free:
1600 be_queue_free(adapter, cq);
1601 rx_eq_destroy:
1602 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1603 rx_eq_free:
1604 be_queue_free(adapter, eq);
1605 return rc;
1606 }
1607
1608 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1609 static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1610 {
1611 return eq_id - adapter->base_eq_id;
1612 }
1613
1614 static irqreturn_t be_intx(int irq, void *dev)
1615 {
1616 struct be_adapter *adapter = dev;
1617 int isr;
1618
1619 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1620 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1621 if (!isr)
1622 return IRQ_NONE;
1623
1624 event_handle(adapter, &adapter->tx_eq);
1625 event_handle(adapter, &adapter->rx_eq);
1626
1627 return IRQ_HANDLED;
1628 }
1629
1630 static irqreturn_t be_msix_rx(int irq, void *dev)
1631 {
1632 struct be_adapter *adapter = dev;
1633
1634 event_handle(adapter, &adapter->rx_eq);
1635
1636 return IRQ_HANDLED;
1637 }
1638
1639 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1640 {
1641 struct be_adapter *adapter = dev;
1642
1643 event_handle(adapter, &adapter->tx_eq);
1644
1645 return IRQ_HANDLED;
1646 }
1647
1648 static inline bool do_gro(struct be_adapter *adapter,
1649 struct be_eth_rx_compl *rxcp)
1650 {
1651 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1652 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1653
1654 if (err)
1655 drvr_stats(adapter)->be_rxcp_err++;
1656
1657 return (tcp_frame && !err) ? true : false;
1658 }
1659
1660 int be_poll_rx(struct napi_struct *napi, int budget)
1661 {
1662 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1663 struct be_adapter *adapter =
1664 container_of(rx_eq, struct be_adapter, rx_eq);
1665 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1666 struct be_eth_rx_compl *rxcp;
1667 u32 work_done;
1668
1669 adapter->stats.drvr_stats.be_rx_polls++;
1670 for (work_done = 0; work_done < budget; work_done++) {
1671 rxcp = be_rx_compl_get(adapter);
1672 if (!rxcp)
1673 break;
1674
1675 if (do_gro(adapter, rxcp))
1676 be_rx_compl_process_gro(adapter, rxcp);
1677 else
1678 be_rx_compl_process(adapter, rxcp);
1679
1680 be_rx_compl_reset(rxcp);
1681 }
1682
1683 /* Refill the queue */
1684 if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1685 be_post_rx_frags(adapter);
1686
1687 /* All consumed */
1688 if (work_done < budget) {
1689 napi_complete(napi);
1690 be_cq_notify(adapter, rx_cq->id, true, work_done);
1691 } else {
1692 /* More to be consumed; continue with interrupts disabled */
1693 be_cq_notify(adapter, rx_cq->id, false, work_done);
1694 }
1695 return work_done;
1696 }
1697
1698 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1699 * For TX/MCC we don't honour budget; consume everything
1700 */
1701 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1702 {
1703 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1704 struct be_adapter *adapter =
1705 container_of(tx_eq, struct be_adapter, tx_eq);
1706 struct be_queue_info *txq = &adapter->tx_obj.q;
1707 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1708 struct be_eth_tx_compl *txcp;
1709 int tx_compl = 0, mcc_compl, status = 0;
1710 u16 end_idx;
1711
1712 while ((txcp = be_tx_compl_get(tx_cq))) {
1713 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1714 wrb_index, txcp);
1715 be_tx_compl_process(adapter, end_idx);
1716 tx_compl++;
1717 }
1718
1719 mcc_compl = be_process_mcc(adapter, &status);
1720
1721 napi_complete(napi);
1722
1723 if (mcc_compl) {
1724 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1725 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1726 }
1727
1728 if (tx_compl) {
1729 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1730
1731 /* As Tx wrbs have been freed up, wake up netdev queue if
1732 * it was stopped due to lack of tx wrbs.
1733 */
1734 if (netif_queue_stopped(adapter->netdev) &&
1735 atomic_read(&txq->used) < txq->len / 2) {
1736 netif_wake_queue(adapter->netdev);
1737 }
1738
1739 drvr_stats(adapter)->be_tx_events++;
1740 drvr_stats(adapter)->be_tx_compl += tx_compl;
1741 }
1742
1743 return 1;
1744 }
1745
1746 static inline bool be_detect_ue(struct be_adapter *adapter)
1747 {
1748 u32 online0 = 0, online1 = 0;
1749
1750 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
1751
1752 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
1753
1754 if (!online0 || !online1) {
1755 adapter->ue_detected = true;
1756 dev_err(&adapter->pdev->dev,
1757 "UE Detected!! online0=%d online1=%d\n",
1758 online0, online1);
1759 return true;
1760 }
1761
1762 return false;
1763 }
1764
1765 void be_dump_ue(struct be_adapter *adapter)
1766 {
1767 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1768 u32 i;
1769
1770 pci_read_config_dword(adapter->pdev,
1771 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1772 pci_read_config_dword(adapter->pdev,
1773 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1774 pci_read_config_dword(adapter->pdev,
1775 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1776 pci_read_config_dword(adapter->pdev,
1777 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1778
1779 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1780 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1781
1782 if (ue_status_lo) {
1783 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1784 if (ue_status_lo & 1)
1785 dev_err(&adapter->pdev->dev,
1786 "UE: %s bit set\n", ue_status_low_desc[i]);
1787 }
1788 }
1789 if (ue_status_hi) {
1790 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1791 if (ue_status_hi & 1)
1792 dev_err(&adapter->pdev->dev,
1793 "UE: %s bit set\n", ue_status_hi_desc[i]);
1794 }
1795 }
1796
1797 }
1798
1799 static void be_worker(struct work_struct *work)
1800 {
1801 struct be_adapter *adapter =
1802 container_of(work, struct be_adapter, work.work);
1803
1804 be_cmd_get_stats(adapter, &adapter->stats.cmd);
1805
1806 /* Set EQ delay */
1807 be_rx_eqd_update(adapter);
1808
1809 be_tx_rate_update(adapter);
1810 be_rx_rate_update(adapter);
1811
1812 if (adapter->rx_post_starved) {
1813 adapter->rx_post_starved = false;
1814 be_post_rx_frags(adapter);
1815 }
1816 if (!adapter->ue_detected) {
1817 if (be_detect_ue(adapter))
1818 be_dump_ue(adapter);
1819 }
1820
1821 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1822 }
1823
1824 static void be_msix_disable(struct be_adapter *adapter)
1825 {
1826 if (adapter->msix_enabled) {
1827 pci_disable_msix(adapter->pdev);
1828 adapter->msix_enabled = false;
1829 }
1830 }
1831
1832 static void be_msix_enable(struct be_adapter *adapter)
1833 {
1834 int i, status;
1835
1836 for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1837 adapter->msix_entries[i].entry = i;
1838
1839 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1840 BE_NUM_MSIX_VECTORS);
1841 if (status == 0)
1842 adapter->msix_enabled = true;
1843 }
1844
1845 static void be_sriov_enable(struct be_adapter *adapter)
1846 {
1847 be_check_sriov_fn_type(adapter);
1848 #ifdef CONFIG_PCI_IOV
1849 if (be_physfn(adapter) && num_vfs) {
1850 int status;
1851
1852 status = pci_enable_sriov(adapter->pdev, num_vfs);
1853 adapter->sriov_enabled = status ? false : true;
1854 }
1855 #endif
1856 }
1857
1858 static void be_sriov_disable(struct be_adapter *adapter)
1859 {
1860 #ifdef CONFIG_PCI_IOV
1861 if (adapter->sriov_enabled) {
1862 pci_disable_sriov(adapter->pdev);
1863 adapter->sriov_enabled = false;
1864 }
1865 #endif
1866 }
1867
1868 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1869 {
1870 return adapter->msix_entries[
1871 be_evt_bit_get(adapter, eq_id)].vector;
1872 }
1873
1874 static int be_request_irq(struct be_adapter *adapter,
1875 struct be_eq_obj *eq_obj,
1876 void *handler, char *desc)
1877 {
1878 struct net_device *netdev = adapter->netdev;
1879 int vec;
1880
1881 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1882 vec = be_msix_vec_get(adapter, eq_obj->q.id);
1883 return request_irq(vec, handler, 0, eq_obj->desc, adapter);
1884 }
1885
1886 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
1887 {
1888 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1889 free_irq(vec, adapter);
1890 }
1891
1892 static int be_msix_register(struct be_adapter *adapter)
1893 {
1894 int status;
1895
1896 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
1897 if (status)
1898 goto err;
1899
1900 status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
1901 if (status)
1902 goto free_tx_irq;
1903
1904 return 0;
1905
1906 free_tx_irq:
1907 be_free_irq(adapter, &adapter->tx_eq);
1908 err:
1909 dev_warn(&adapter->pdev->dev,
1910 "MSIX Request IRQ failed - err %d\n", status);
1911 pci_disable_msix(adapter->pdev);
1912 adapter->msix_enabled = false;
1913 return status;
1914 }
1915
1916 static int be_irq_register(struct be_adapter *adapter)
1917 {
1918 struct net_device *netdev = adapter->netdev;
1919 int status;
1920
1921 if (adapter->msix_enabled) {
1922 status = be_msix_register(adapter);
1923 if (status == 0)
1924 goto done;
1925 /* INTx is not supported for VF */
1926 if (!be_physfn(adapter))
1927 return status;
1928 }
1929
1930 /* INTx */
1931 netdev->irq = adapter->pdev->irq;
1932 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1933 adapter);
1934 if (status) {
1935 dev_err(&adapter->pdev->dev,
1936 "INTx request IRQ failed - err %d\n", status);
1937 return status;
1938 }
1939 done:
1940 adapter->isr_registered = true;
1941 return 0;
1942 }
1943
1944 static void be_irq_unregister(struct be_adapter *adapter)
1945 {
1946 struct net_device *netdev = adapter->netdev;
1947
1948 if (!adapter->isr_registered)
1949 return;
1950
1951 /* INTx */
1952 if (!adapter->msix_enabled) {
1953 free_irq(netdev->irq, adapter);
1954 goto done;
1955 }
1956
1957 /* MSIx */
1958 be_free_irq(adapter, &adapter->tx_eq);
1959 be_free_irq(adapter, &adapter->rx_eq);
1960 done:
1961 adapter->isr_registered = false;
1962 }
1963
1964 static int be_close(struct net_device *netdev)
1965 {
1966 struct be_adapter *adapter = netdev_priv(netdev);
1967 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1968 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1969 int vec;
1970
1971 cancel_delayed_work_sync(&adapter->work);
1972
1973 be_async_mcc_disable(adapter);
1974
1975 netif_stop_queue(netdev);
1976 netif_carrier_off(netdev);
1977 adapter->link_up = false;
1978
1979 be_intr_set(adapter, false);
1980
1981 if (adapter->msix_enabled) {
1982 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1983 synchronize_irq(vec);
1984 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1985 synchronize_irq(vec);
1986 } else {
1987 synchronize_irq(netdev->irq);
1988 }
1989 be_irq_unregister(adapter);
1990
1991 napi_disable(&rx_eq->napi);
1992 napi_disable(&tx_eq->napi);
1993
1994 /* Wait for all pending tx completions to arrive so that
1995 * all tx skbs are freed.
1996 */
1997 be_tx_compl_clean(adapter);
1998
1999 return 0;
2000 }
2001
2002 static int be_open(struct net_device *netdev)
2003 {
2004 struct be_adapter *adapter = netdev_priv(netdev);
2005 struct be_eq_obj *rx_eq = &adapter->rx_eq;
2006 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2007 bool link_up;
2008 int status;
2009 u8 mac_speed;
2010 u16 link_speed;
2011
2012 /* First time posting */
2013 be_post_rx_frags(adapter);
2014
2015 napi_enable(&rx_eq->napi);
2016 napi_enable(&tx_eq->napi);
2017
2018 be_irq_register(adapter);
2019
2020 be_intr_set(adapter, true);
2021
2022 /* The evt queues are created in unarmed state; arm them */
2023 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
2024 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2025
2026 /* Rx compl queue may be in unarmed state; rearm it */
2027 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
2028
2029 /* Now that interrupts are on we can process async mcc */
2030 be_async_mcc_enable(adapter);
2031
2032 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2033
2034 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2035 &link_speed);
2036 if (status)
2037 goto err;
2038 be_link_status_update(adapter, link_up);
2039
2040 if (be_physfn(adapter)) {
2041 status = be_vid_config(adapter, false, 0);
2042 if (status)
2043 goto err;
2044
2045 status = be_cmd_set_flow_control(adapter,
2046 adapter->tx_fc, adapter->rx_fc);
2047 if (status)
2048 goto err;
2049 }
2050
2051 return 0;
2052 err:
2053 be_close(adapter->netdev);
2054 return -EIO;
2055 }
2056
2057 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2058 {
2059 struct be_dma_mem cmd;
2060 int status = 0;
2061 u8 mac[ETH_ALEN];
2062
2063 memset(mac, 0, ETH_ALEN);
2064
2065 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2066 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2067 if (cmd.va == NULL)
2068 return -1;
2069 memset(cmd.va, 0, cmd.size);
2070
2071 if (enable) {
2072 status = pci_write_config_dword(adapter->pdev,
2073 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2074 if (status) {
2075 dev_err(&adapter->pdev->dev,
2076 "Could not enable Wake-on-lan\n");
2077 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2078 cmd.dma);
2079 return status;
2080 }
2081 status = be_cmd_enable_magic_wol(adapter,
2082 adapter->netdev->dev_addr, &cmd);
2083 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2084 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2085 } else {
2086 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2087 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2088 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2089 }
2090
2091 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2092 return status;
2093 }
2094
2095 static int be_setup(struct be_adapter *adapter)
2096 {
2097 struct net_device *netdev = adapter->netdev;
2098 u32 cap_flags, en_flags, vf = 0;
2099 int status;
2100 u8 mac[ETH_ALEN];
2101
2102 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2103
2104 if (be_physfn(adapter)) {
2105 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2106 BE_IF_FLAGS_PROMISCUOUS |
2107 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2108 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2109 }
2110
2111 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2112 netdev->dev_addr, false/* pmac_invalid */,
2113 &adapter->if_handle, &adapter->pmac_id, 0);
2114 if (status != 0)
2115 goto do_none;
2116
2117 if (be_physfn(adapter)) {
2118 while (vf < num_vfs) {
2119 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2120 | BE_IF_FLAGS_BROADCAST;
2121 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2122 mac, true,
2123 &adapter->vf_cfg[vf].vf_if_handle,
2124 NULL, vf+1);
2125 if (status) {
2126 dev_err(&adapter->pdev->dev,
2127 "Interface Create failed for VF %d\n", vf);
2128 goto if_destroy;
2129 }
2130 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2131 vf++;
2132 }
2133 } else if (!be_physfn(adapter)) {
2134 status = be_cmd_mac_addr_query(adapter, mac,
2135 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2136 if (!status) {
2137 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2138 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2139 }
2140 }
2141
2142 status = be_tx_queues_create(adapter);
2143 if (status != 0)
2144 goto if_destroy;
2145
2146 status = be_rx_queues_create(adapter);
2147 if (status != 0)
2148 goto tx_qs_destroy;
2149
2150 status = be_mcc_queues_create(adapter);
2151 if (status != 0)
2152 goto rx_qs_destroy;
2153
2154 adapter->link_speed = -1;
2155
2156 return 0;
2157
2158 rx_qs_destroy:
2159 be_rx_queues_destroy(adapter);
2160 tx_qs_destroy:
2161 be_tx_queues_destroy(adapter);
2162 if_destroy:
2163 for (vf = 0; vf < num_vfs; vf++)
2164 if (adapter->vf_cfg[vf].vf_if_handle)
2165 be_cmd_if_destroy(adapter,
2166 adapter->vf_cfg[vf].vf_if_handle);
2167 be_cmd_if_destroy(adapter, adapter->if_handle);
2168 do_none:
2169 return status;
2170 }
2171
2172 static int be_clear(struct be_adapter *adapter)
2173 {
2174 be_mcc_queues_destroy(adapter);
2175 be_rx_queues_destroy(adapter);
2176 be_tx_queues_destroy(adapter);
2177
2178 be_cmd_if_destroy(adapter, adapter->if_handle);
2179
2180 /* tell fw we're done with firing cmds */
2181 be_cmd_fw_clean(adapter);
2182 return 0;
2183 }
2184
2185
2186 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2187 char flash_cookie[2][16] = {"*** SE FLAS",
2188 "H DIRECTORY *** "};
2189
2190 static bool be_flash_redboot(struct be_adapter *adapter,
2191 const u8 *p, u32 img_start, int image_size,
2192 int hdr_size)
2193 {
2194 u32 crc_offset;
2195 u8 flashed_crc[4];
2196 int status;
2197
2198 crc_offset = hdr_size + img_start + image_size - 4;
2199
2200 p += crc_offset;
2201
2202 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2203 (image_size - 4));
2204 if (status) {
2205 dev_err(&adapter->pdev->dev,
2206 "could not get crc from flash, not flashing redboot\n");
2207 return false;
2208 }
2209
2210 /*update redboot only if crc does not match*/
2211 if (!memcmp(flashed_crc, p, 4))
2212 return false;
2213 else
2214 return true;
2215 }
2216
2217 static int be_flash_data(struct be_adapter *adapter,
2218 const struct firmware *fw,
2219 struct be_dma_mem *flash_cmd, int num_of_images)
2220
2221 {
2222 int status = 0, i, filehdr_size = 0;
2223 u32 total_bytes = 0, flash_op;
2224 int num_bytes;
2225 const u8 *p = fw->data;
2226 struct be_cmd_write_flashrom *req = flash_cmd->va;
2227 struct flash_comp *pflashcomp;
2228 int num_comp;
2229
2230 struct flash_comp gen3_flash_types[9] = {
2231 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2232 FLASH_IMAGE_MAX_SIZE_g3},
2233 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2234 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2235 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2236 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2237 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2238 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2239 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2240 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2241 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2242 FLASH_IMAGE_MAX_SIZE_g3},
2243 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2244 FLASH_IMAGE_MAX_SIZE_g3},
2245 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2246 FLASH_IMAGE_MAX_SIZE_g3},
2247 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2248 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2249 };
2250 struct flash_comp gen2_flash_types[8] = {
2251 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2252 FLASH_IMAGE_MAX_SIZE_g2},
2253 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2254 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2255 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2256 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2257 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2258 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2259 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2260 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2261 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2262 FLASH_IMAGE_MAX_SIZE_g2},
2263 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2264 FLASH_IMAGE_MAX_SIZE_g2},
2265 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2266 FLASH_IMAGE_MAX_SIZE_g2}
2267 };
2268
2269 if (adapter->generation == BE_GEN3) {
2270 pflashcomp = gen3_flash_types;
2271 filehdr_size = sizeof(struct flash_file_hdr_g3);
2272 num_comp = 9;
2273 } else {
2274 pflashcomp = gen2_flash_types;
2275 filehdr_size = sizeof(struct flash_file_hdr_g2);
2276 num_comp = 8;
2277 }
2278 for (i = 0; i < num_comp; i++) {
2279 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2280 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2281 continue;
2282 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2283 (!be_flash_redboot(adapter, fw->data,
2284 pflashcomp[i].offset, pflashcomp[i].size,
2285 filehdr_size)))
2286 continue;
2287 p = fw->data;
2288 p += filehdr_size + pflashcomp[i].offset
2289 + (num_of_images * sizeof(struct image_hdr));
2290 if (p + pflashcomp[i].size > fw->data + fw->size)
2291 return -1;
2292 total_bytes = pflashcomp[i].size;
2293 while (total_bytes) {
2294 if (total_bytes > 32*1024)
2295 num_bytes = 32*1024;
2296 else
2297 num_bytes = total_bytes;
2298 total_bytes -= num_bytes;
2299
2300 if (!total_bytes)
2301 flash_op = FLASHROM_OPER_FLASH;
2302 else
2303 flash_op = FLASHROM_OPER_SAVE;
2304 memcpy(req->params.data_buf, p, num_bytes);
2305 p += num_bytes;
2306 status = be_cmd_write_flashrom(adapter, flash_cmd,
2307 pflashcomp[i].optype, flash_op, num_bytes);
2308 if (status) {
2309 dev_err(&adapter->pdev->dev,
2310 "cmd to write to flash rom failed.\n");
2311 return -1;
2312 }
2313 yield();
2314 }
2315 }
2316 return 0;
2317 }
2318
2319 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2320 {
2321 if (fhdr == NULL)
2322 return 0;
2323 if (fhdr->build[0] == '3')
2324 return BE_GEN3;
2325 else if (fhdr->build[0] == '2')
2326 return BE_GEN2;
2327 else
2328 return 0;
2329 }
2330
2331 int be_load_fw(struct be_adapter *adapter, u8 *func)
2332 {
2333 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2334 const struct firmware *fw;
2335 struct flash_file_hdr_g2 *fhdr;
2336 struct flash_file_hdr_g3 *fhdr3;
2337 struct image_hdr *img_hdr_ptr = NULL;
2338 struct be_dma_mem flash_cmd;
2339 int status, i = 0, num_imgs = 0;
2340 const u8 *p;
2341
2342 strcpy(fw_file, func);
2343
2344 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2345 if (status)
2346 goto fw_exit;
2347
2348 p = fw->data;
2349 fhdr = (struct flash_file_hdr_g2 *) p;
2350 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2351
2352 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2353 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2354 &flash_cmd.dma);
2355 if (!flash_cmd.va) {
2356 status = -ENOMEM;
2357 dev_err(&adapter->pdev->dev,
2358 "Memory allocation failure while flashing\n");
2359 goto fw_exit;
2360 }
2361
2362 if ((adapter->generation == BE_GEN3) &&
2363 (get_ufigen_type(fhdr) == BE_GEN3)) {
2364 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2365 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2366 for (i = 0; i < num_imgs; i++) {
2367 img_hdr_ptr = (struct image_hdr *) (fw->data +
2368 (sizeof(struct flash_file_hdr_g3) +
2369 i * sizeof(struct image_hdr)));
2370 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2371 status = be_flash_data(adapter, fw, &flash_cmd,
2372 num_imgs);
2373 }
2374 } else if ((adapter->generation == BE_GEN2) &&
2375 (get_ufigen_type(fhdr) == BE_GEN2)) {
2376 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2377 } else {
2378 dev_err(&adapter->pdev->dev,
2379 "UFI and Interface are not compatible for flashing\n");
2380 status = -1;
2381 }
2382
2383 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2384 flash_cmd.dma);
2385 if (status) {
2386 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2387 goto fw_exit;
2388 }
2389
2390 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2391
2392 fw_exit:
2393 release_firmware(fw);
2394 return status;
2395 }
2396
2397 static struct net_device_ops be_netdev_ops = {
2398 .ndo_open = be_open,
2399 .ndo_stop = be_close,
2400 .ndo_start_xmit = be_xmit,
2401 .ndo_get_stats = be_get_stats,
2402 .ndo_set_rx_mode = be_set_multicast_list,
2403 .ndo_set_mac_address = be_mac_addr_set,
2404 .ndo_change_mtu = be_change_mtu,
2405 .ndo_validate_addr = eth_validate_addr,
2406 .ndo_vlan_rx_register = be_vlan_register,
2407 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2408 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2409 .ndo_set_vf_mac = be_set_vf_mac,
2410 .ndo_set_vf_vlan = be_set_vf_vlan,
2411 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2412 .ndo_get_vf_config = be_get_vf_config
2413 };
2414
2415 static void be_netdev_init(struct net_device *netdev)
2416 {
2417 struct be_adapter *adapter = netdev_priv(netdev);
2418
2419 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2420 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
2421 NETIF_F_GRO | NETIF_F_TSO6;
2422
2423 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2424
2425 netdev->flags |= IFF_MULTICAST;
2426
2427 adapter->rx_csum = true;
2428
2429 /* Default settings for Rx and Tx flow control */
2430 adapter->rx_fc = true;
2431 adapter->tx_fc = true;
2432
2433 netif_set_gso_max_size(netdev, 65535);
2434
2435 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2436
2437 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2438
2439 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
2440 BE_NAPI_WEIGHT);
2441 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2442 BE_NAPI_WEIGHT);
2443
2444 netif_carrier_off(netdev);
2445 netif_stop_queue(netdev);
2446 }
2447
2448 static void be_unmap_pci_bars(struct be_adapter *adapter)
2449 {
2450 if (adapter->csr)
2451 iounmap(adapter->csr);
2452 if (adapter->db)
2453 iounmap(adapter->db);
2454 if (adapter->pcicfg && be_physfn(adapter))
2455 iounmap(adapter->pcicfg);
2456 }
2457
2458 static int be_map_pci_bars(struct be_adapter *adapter)
2459 {
2460 u8 __iomem *addr;
2461 int pcicfg_reg, db_reg;
2462
2463 if (be_physfn(adapter)) {
2464 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2465 pci_resource_len(adapter->pdev, 2));
2466 if (addr == NULL)
2467 return -ENOMEM;
2468 adapter->csr = addr;
2469 }
2470
2471 if (adapter->generation == BE_GEN2) {
2472 pcicfg_reg = 1;
2473 db_reg = 4;
2474 } else {
2475 pcicfg_reg = 0;
2476 if (be_physfn(adapter))
2477 db_reg = 4;
2478 else
2479 db_reg = 0;
2480 }
2481 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2482 pci_resource_len(adapter->pdev, db_reg));
2483 if (addr == NULL)
2484 goto pci_map_err;
2485 adapter->db = addr;
2486
2487 if (be_physfn(adapter)) {
2488 addr = ioremap_nocache(
2489 pci_resource_start(adapter->pdev, pcicfg_reg),
2490 pci_resource_len(adapter->pdev, pcicfg_reg));
2491 if (addr == NULL)
2492 goto pci_map_err;
2493 adapter->pcicfg = addr;
2494 } else
2495 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2496
2497 return 0;
2498 pci_map_err:
2499 be_unmap_pci_bars(adapter);
2500 return -ENOMEM;
2501 }
2502
2503
2504 static void be_ctrl_cleanup(struct be_adapter *adapter)
2505 {
2506 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2507
2508 be_unmap_pci_bars(adapter);
2509
2510 if (mem->va)
2511 pci_free_consistent(adapter->pdev, mem->size,
2512 mem->va, mem->dma);
2513
2514 mem = &adapter->mc_cmd_mem;
2515 if (mem->va)
2516 pci_free_consistent(adapter->pdev, mem->size,
2517 mem->va, mem->dma);
2518 }
2519
2520 static int be_ctrl_init(struct be_adapter *adapter)
2521 {
2522 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2523 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2524 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2525 int status;
2526
2527 status = be_map_pci_bars(adapter);
2528 if (status)
2529 goto done;
2530
2531 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2532 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2533 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2534 if (!mbox_mem_alloc->va) {
2535 status = -ENOMEM;
2536 goto unmap_pci_bars;
2537 }
2538
2539 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2540 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2541 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2542 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2543
2544 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2545 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2546 &mc_cmd_mem->dma);
2547 if (mc_cmd_mem->va == NULL) {
2548 status = -ENOMEM;
2549 goto free_mbox;
2550 }
2551 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2552
2553 spin_lock_init(&adapter->mbox_lock);
2554 spin_lock_init(&adapter->mcc_lock);
2555 spin_lock_init(&adapter->mcc_cq_lock);
2556
2557 init_completion(&adapter->flash_compl);
2558 pci_save_state(adapter->pdev);
2559 return 0;
2560
2561 free_mbox:
2562 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2563 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2564
2565 unmap_pci_bars:
2566 be_unmap_pci_bars(adapter);
2567
2568 done:
2569 return status;
2570 }
2571
2572 static void be_stats_cleanup(struct be_adapter *adapter)
2573 {
2574 struct be_stats_obj *stats = &adapter->stats;
2575 struct be_dma_mem *cmd = &stats->cmd;
2576
2577 if (cmd->va)
2578 pci_free_consistent(adapter->pdev, cmd->size,
2579 cmd->va, cmd->dma);
2580 }
2581
2582 static int be_stats_init(struct be_adapter *adapter)
2583 {
2584 struct be_stats_obj *stats = &adapter->stats;
2585 struct be_dma_mem *cmd = &stats->cmd;
2586
2587 cmd->size = sizeof(struct be_cmd_req_get_stats);
2588 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2589 if (cmd->va == NULL)
2590 return -1;
2591 memset(cmd->va, 0, cmd->size);
2592 return 0;
2593 }
2594
2595 static void __devexit be_remove(struct pci_dev *pdev)
2596 {
2597 struct be_adapter *adapter = pci_get_drvdata(pdev);
2598
2599 if (!adapter)
2600 return;
2601
2602 unregister_netdev(adapter->netdev);
2603
2604 be_clear(adapter);
2605
2606 be_stats_cleanup(adapter);
2607
2608 be_ctrl_cleanup(adapter);
2609
2610 be_sriov_disable(adapter);
2611
2612 be_msix_disable(adapter);
2613
2614 pci_set_drvdata(pdev, NULL);
2615 pci_release_regions(pdev);
2616 pci_disable_device(pdev);
2617
2618 free_netdev(adapter->netdev);
2619 }
2620
2621 static int be_get_config(struct be_adapter *adapter)
2622 {
2623 int status;
2624 u8 mac[ETH_ALEN];
2625
2626 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2627 if (status)
2628 return status;
2629
2630 status = be_cmd_query_fw_cfg(adapter,
2631 &adapter->port_num, &adapter->function_mode);
2632 if (status)
2633 return status;
2634
2635 memset(mac, 0, ETH_ALEN);
2636
2637 if (be_physfn(adapter)) {
2638 status = be_cmd_mac_addr_query(adapter, mac,
2639 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2640
2641 if (status)
2642 return status;
2643
2644 if (!is_valid_ether_addr(mac))
2645 return -EADDRNOTAVAIL;
2646
2647 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2648 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2649 }
2650
2651 if (adapter->function_mode & 0x400)
2652 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2653 else
2654 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2655
2656 return 0;
2657 }
2658
2659 static int __devinit be_probe(struct pci_dev *pdev,
2660 const struct pci_device_id *pdev_id)
2661 {
2662 int status = 0;
2663 struct be_adapter *adapter;
2664 struct net_device *netdev;
2665
2666
2667 status = pci_enable_device(pdev);
2668 if (status)
2669 goto do_none;
2670
2671 status = pci_request_regions(pdev, DRV_NAME);
2672 if (status)
2673 goto disable_dev;
2674 pci_set_master(pdev);
2675
2676 netdev = alloc_etherdev(sizeof(struct be_adapter));
2677 if (netdev == NULL) {
2678 status = -ENOMEM;
2679 goto rel_reg;
2680 }
2681 adapter = netdev_priv(netdev);
2682
2683 switch (pdev->device) {
2684 case BE_DEVICE_ID1:
2685 case OC_DEVICE_ID1:
2686 adapter->generation = BE_GEN2;
2687 break;
2688 case BE_DEVICE_ID2:
2689 case OC_DEVICE_ID2:
2690 adapter->generation = BE_GEN3;
2691 break;
2692 default:
2693 adapter->generation = 0;
2694 }
2695
2696 adapter->pdev = pdev;
2697 pci_set_drvdata(pdev, adapter);
2698 adapter->netdev = netdev;
2699 be_netdev_init(netdev);
2700 SET_NETDEV_DEV(netdev, &pdev->dev);
2701
2702 be_msix_enable(adapter);
2703
2704 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2705 if (!status) {
2706 netdev->features |= NETIF_F_HIGHDMA;
2707 } else {
2708 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2709 if (status) {
2710 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2711 goto free_netdev;
2712 }
2713 }
2714
2715 be_sriov_enable(adapter);
2716
2717 status = be_ctrl_init(adapter);
2718 if (status)
2719 goto free_netdev;
2720
2721 /* sync up with fw's ready state */
2722 if (be_physfn(adapter)) {
2723 status = be_cmd_POST(adapter);
2724 if (status)
2725 goto ctrl_clean;
2726 }
2727
2728 /* tell fw we're ready to fire cmds */
2729 status = be_cmd_fw_init(adapter);
2730 if (status)
2731 goto ctrl_clean;
2732
2733 if (be_physfn(adapter)) {
2734 status = be_cmd_reset_function(adapter);
2735 if (status)
2736 goto ctrl_clean;
2737 }
2738
2739 status = be_stats_init(adapter);
2740 if (status)
2741 goto ctrl_clean;
2742
2743 status = be_get_config(adapter);
2744 if (status)
2745 goto stats_clean;
2746
2747 INIT_DELAYED_WORK(&adapter->work, be_worker);
2748
2749 status = be_setup(adapter);
2750 if (status)
2751 goto stats_clean;
2752
2753 status = register_netdev(netdev);
2754 if (status != 0)
2755 goto unsetup;
2756
2757 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2758 return 0;
2759
2760 unsetup:
2761 be_clear(adapter);
2762 stats_clean:
2763 be_stats_cleanup(adapter);
2764 ctrl_clean:
2765 be_ctrl_cleanup(adapter);
2766 free_netdev:
2767 be_msix_disable(adapter);
2768 be_sriov_disable(adapter);
2769 free_netdev(adapter->netdev);
2770 pci_set_drvdata(pdev, NULL);
2771 rel_reg:
2772 pci_release_regions(pdev);
2773 disable_dev:
2774 pci_disable_device(pdev);
2775 do_none:
2776 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
2777 return status;
2778 }
2779
2780 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2781 {
2782 struct be_adapter *adapter = pci_get_drvdata(pdev);
2783 struct net_device *netdev = adapter->netdev;
2784
2785 if (adapter->wol)
2786 be_setup_wol(adapter, true);
2787
2788 netif_device_detach(netdev);
2789 if (netif_running(netdev)) {
2790 rtnl_lock();
2791 be_close(netdev);
2792 rtnl_unlock();
2793 }
2794 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
2795 be_clear(adapter);
2796
2797 pci_save_state(pdev);
2798 pci_disable_device(pdev);
2799 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2800 return 0;
2801 }
2802
2803 static int be_resume(struct pci_dev *pdev)
2804 {
2805 int status = 0;
2806 struct be_adapter *adapter = pci_get_drvdata(pdev);
2807 struct net_device *netdev = adapter->netdev;
2808
2809 netif_device_detach(netdev);
2810
2811 status = pci_enable_device(pdev);
2812 if (status)
2813 return status;
2814
2815 pci_set_power_state(pdev, 0);
2816 pci_restore_state(pdev);
2817
2818 /* tell fw we're ready to fire cmds */
2819 status = be_cmd_fw_init(adapter);
2820 if (status)
2821 return status;
2822
2823 be_setup(adapter);
2824 if (netif_running(netdev)) {
2825 rtnl_lock();
2826 be_open(netdev);
2827 rtnl_unlock();
2828 }
2829 netif_device_attach(netdev);
2830
2831 if (adapter->wol)
2832 be_setup_wol(adapter, false);
2833 return 0;
2834 }
2835
2836 /*
2837 * An FLR will stop BE from DMAing any data.
2838 */
2839 static void be_shutdown(struct pci_dev *pdev)
2840 {
2841 struct be_adapter *adapter = pci_get_drvdata(pdev);
2842 struct net_device *netdev = adapter->netdev;
2843
2844 netif_device_detach(netdev);
2845
2846 be_cmd_reset_function(adapter);
2847
2848 if (adapter->wol)
2849 be_setup_wol(adapter, true);
2850
2851 pci_disable_device(pdev);
2852 }
2853
2854 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2855 pci_channel_state_t state)
2856 {
2857 struct be_adapter *adapter = pci_get_drvdata(pdev);
2858 struct net_device *netdev = adapter->netdev;
2859
2860 dev_err(&adapter->pdev->dev, "EEH error detected\n");
2861
2862 adapter->eeh_err = true;
2863
2864 netif_device_detach(netdev);
2865
2866 if (netif_running(netdev)) {
2867 rtnl_lock();
2868 be_close(netdev);
2869 rtnl_unlock();
2870 }
2871 be_clear(adapter);
2872
2873 if (state == pci_channel_io_perm_failure)
2874 return PCI_ERS_RESULT_DISCONNECT;
2875
2876 pci_disable_device(pdev);
2877
2878 return PCI_ERS_RESULT_NEED_RESET;
2879 }
2880
2881 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2882 {
2883 struct be_adapter *adapter = pci_get_drvdata(pdev);
2884 int status;
2885
2886 dev_info(&adapter->pdev->dev, "EEH reset\n");
2887 adapter->eeh_err = false;
2888
2889 status = pci_enable_device(pdev);
2890 if (status)
2891 return PCI_ERS_RESULT_DISCONNECT;
2892
2893 pci_set_master(pdev);
2894 pci_set_power_state(pdev, 0);
2895 pci_restore_state(pdev);
2896
2897 /* Check if card is ok and fw is ready */
2898 status = be_cmd_POST(adapter);
2899 if (status)
2900 return PCI_ERS_RESULT_DISCONNECT;
2901
2902 return PCI_ERS_RESULT_RECOVERED;
2903 }
2904
2905 static void be_eeh_resume(struct pci_dev *pdev)
2906 {
2907 int status = 0;
2908 struct be_adapter *adapter = pci_get_drvdata(pdev);
2909 struct net_device *netdev = adapter->netdev;
2910
2911 dev_info(&adapter->pdev->dev, "EEH resume\n");
2912
2913 pci_save_state(pdev);
2914
2915 /* tell fw we're ready to fire cmds */
2916 status = be_cmd_fw_init(adapter);
2917 if (status)
2918 goto err;
2919
2920 status = be_setup(adapter);
2921 if (status)
2922 goto err;
2923
2924 if (netif_running(netdev)) {
2925 status = be_open(netdev);
2926 if (status)
2927 goto err;
2928 }
2929 netif_device_attach(netdev);
2930 return;
2931 err:
2932 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
2933 }
2934
2935 static struct pci_error_handlers be_eeh_handlers = {
2936 .error_detected = be_eeh_err_detected,
2937 .slot_reset = be_eeh_reset,
2938 .resume = be_eeh_resume,
2939 };
2940
2941 static struct pci_driver be_driver = {
2942 .name = DRV_NAME,
2943 .id_table = be_dev_ids,
2944 .probe = be_probe,
2945 .remove = be_remove,
2946 .suspend = be_suspend,
2947 .resume = be_resume,
2948 .shutdown = be_shutdown,
2949 .err_handler = &be_eeh_handlers
2950 };
2951
2952 static int __init be_init_module(void)
2953 {
2954 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
2955 rx_frag_size != 2048) {
2956 printk(KERN_WARNING DRV_NAME
2957 " : Module param rx_frag_size must be 2048/4096/8192."
2958 " Using 2048\n");
2959 rx_frag_size = 2048;
2960 }
2961
2962 if (num_vfs > 32) {
2963 printk(KERN_WARNING DRV_NAME
2964 " : Module param num_vfs must not be greater than 32."
2965 "Using 32\n");
2966 num_vfs = 32;
2967 }
2968
2969 return pci_register_driver(&be_driver);
2970 }
2971 module_init(be_init_module);
2972
2973 static void __exit be_exit_module(void)
2974 {
2975 pci_unregister_driver(&be_driver);
2976 }
2977 module_exit(be_exit_module);
This page took 0.091526 seconds and 5 git commands to generate.