be2net: dynamically allocate adapter->vf_cfg
[deliverable/linux.git] / drivers / net / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117 };
118
119 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120 {
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
125 }
126
127 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129 {
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142 }
143
144 static void be_intr_set(struct be_adapter *adapter, bool enable)
145 {
146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
149
150 if (adapter->eeh_err)
151 return;
152
153 if (!enabled && enable)
154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 else if (enabled && !enable)
156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157 else
158 return;
159
160 iowrite32(reg, addr);
161 }
162
163 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
164 {
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
168
169 wmb();
170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
171 }
172
173 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
174 {
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
178
179 wmb();
180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
181 }
182
183 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
184 bool arm, bool clear_int, u16 num_popped)
185 {
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
190
191 if (adapter->eeh_err)
192 return;
193
194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
201 }
202
203 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
204 {
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
209
210 if (adapter->eeh_err)
211 return;
212
213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
217 }
218
219 static int be_mac_addr_set(struct net_device *netdev, void *p)
220 {
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
236 if (status)
237 return status;
238
239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
240 adapter->if_handle, &adapter->pmac_id, 0);
241 netdev_addr:
242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246 }
247
248 void netdev_stats_update(struct be_adapter *adapter)
249 {
250 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
251 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252 struct be_port_rxf_stats *port_stats =
253 &rxf_stats->port[adapter->port_num];
254 struct net_device_stats *dev_stats = &adapter->netdev->stats;
255 struct be_erx_stats *erx_stats = &hw_stats->erx;
256 struct be_rx_obj *rxo;
257 int i;
258
259 memset(dev_stats, 0, sizeof(*dev_stats));
260 for_all_rx_queues(adapter, rxo, i) {
261 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264 /* no space in linux buffers: best possible approximation */
265 dev_stats->rx_dropped +=
266 erx_stats->rx_drops_no_fragments[rxo->q.id];
267 }
268
269 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
271
272 /* bad pkts received */
273 dev_stats->rx_errors = port_stats->rx_crc_errors +
274 port_stats->rx_alignment_symbol_errors +
275 port_stats->rx_in_range_errors +
276 port_stats->rx_out_range_errors +
277 port_stats->rx_frame_too_long +
278 port_stats->rx_dropped_too_small +
279 port_stats->rx_dropped_too_short +
280 port_stats->rx_dropped_header_too_small +
281 port_stats->rx_dropped_tcp_length +
282 port_stats->rx_dropped_runt +
283 port_stats->rx_tcp_checksum_errs +
284 port_stats->rx_ip_checksum_errs +
285 port_stats->rx_udp_checksum_errs;
286
287 /* detailed rx errors */
288 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
289 port_stats->rx_out_range_errors +
290 port_stats->rx_frame_too_long;
291
292 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
293
294 /* frame alignment errors */
295 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
296
297 /* receiver fifo overrun */
298 /* drops_no_pbuf is no per i/f, it's per BE card */
299 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300 port_stats->rx_input_fifo_overflow +
301 rxf_stats->rx_drops_no_pbuf;
302 }
303
304 void be_link_status_update(struct be_adapter *adapter, bool link_up)
305 {
306 struct net_device *netdev = adapter->netdev;
307
308 /* If link came up or went down */
309 if (adapter->link_up != link_up) {
310 adapter->link_speed = -1;
311 if (link_up) {
312 netif_carrier_on(netdev);
313 printk(KERN_INFO "%s: Link up\n", netdev->name);
314 } else {
315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
317 }
318 adapter->link_up = link_up;
319 }
320 }
321
322 /* Update the EQ delay n BE based on the RX frags consumed / sec */
323 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
324 {
325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
327 ulong now = jiffies;
328 u32 eqd;
329
330 if (!rx_eq->enable_aic)
331 return;
332
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
337 }
338
339 /* Update once a second */
340 if ((now - stats->rx_fps_jiffies) < HZ)
341 return;
342
343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
344 ((now - stats->rx_fps_jiffies) / HZ);
345
346 stats->rx_fps_jiffies = now;
347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
358
359 rx_eq->cur_eqd = eqd;
360 }
361
362 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363 {
364 u64 rate = bytes;
365
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
369
370 return rate;
371 }
372
373 static void be_tx_rate_update(struct be_adapter *adapter)
374 {
375 struct be_tx_stats *stats = tx_stats(adapter);
376 ulong now = jiffies;
377
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
382 }
383
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391 }
392 }
393
394 static void be_tx_stats_update(struct be_adapter *adapter,
395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
396 {
397 struct be_tx_stats *stats = tx_stats(adapter);
398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
402 if (stopped)
403 stats->be_tx_stops++;
404 }
405
406 /* Determine number of WRB entries needed to xmit data in an skb */
407 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
408 bool *dummy)
409 {
410 int cnt = (skb->len > skb->data_len);
411
412 cnt += skb_shinfo(skb)->nr_frags;
413
414 /* to account for hdr wrb */
415 cnt++;
416 if (lancer_chip(adapter) || !(cnt & 1)) {
417 *dummy = false;
418 } else {
419 /* add a dummy to make it an even num */
420 cnt++;
421 *dummy = true;
422 }
423 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
424 return cnt;
425 }
426
427 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
428 {
429 wrb->frag_pa_hi = upper_32_bits(addr);
430 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432 }
433
434 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435 struct sk_buff *skb, u32 wrb_cnt, u32 len)
436 {
437 u8 vlan_prio = 0;
438 u16 vlan_tag = 0;
439
440 memset(hdr, 0, sizeof(*hdr));
441
442 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
443
444 if (skb_is_gso(skb)) {
445 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447 hdr, skb_shinfo(skb)->gso_size);
448 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
450 if (lancer_chip(adapter) && adapter->sli_family ==
451 LANCER_A0_SLI_FAMILY) {
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
453 if (is_tcp_pkt(skb))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
455 tcpcs, hdr, 1);
456 else if (is_udp_pkt(skb))
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458 udpcs, hdr, 1);
459 }
460 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
461 if (is_tcp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463 else if (is_udp_pkt(skb))
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
465 }
466
467 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
469 vlan_tag = vlan_tx_tag_get(skb);
470 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471 /* If vlan priority provided by OS is NOT in available bmap */
472 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474 adapter->recommended_prio;
475 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
476 }
477
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
482 }
483
484 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
485 bool unmap_single)
486 {
487 dma_addr_t dma;
488
489 be_dws_le_to_cpu(wrb, sizeof(*wrb));
490
491 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
492 if (wrb->frag_len) {
493 if (unmap_single)
494 dma_unmap_single(dev, dma, wrb->frag_len,
495 DMA_TO_DEVICE);
496 else
497 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
498 }
499 }
500
501 static int make_tx_wrbs(struct be_adapter *adapter,
502 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
503 {
504 dma_addr_t busaddr;
505 int i, copied = 0;
506 struct device *dev = &adapter->pdev->dev;
507 struct sk_buff *first_skb = skb;
508 struct be_queue_info *txq = &adapter->tx_obj.q;
509 struct be_eth_wrb *wrb;
510 struct be_eth_hdr_wrb *hdr;
511 bool map_single = false;
512 u16 map_head;
513
514 hdr = queue_head_node(txq);
515 queue_head_inc(txq);
516 map_head = txq->head;
517
518 if (skb->len > skb->data_len) {
519 int len = skb_headlen(skb);
520 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521 if (dma_mapping_error(dev, busaddr))
522 goto dma_err;
523 map_single = true;
524 wrb = queue_head_node(txq);
525 wrb_fill(wrb, busaddr, len);
526 be_dws_cpu_to_le(wrb, sizeof(*wrb));
527 queue_head_inc(txq);
528 copied += len;
529 }
530
531 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532 struct skb_frag_struct *frag =
533 &skb_shinfo(skb)->frags[i];
534 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535 frag->size, DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, busaddr))
537 goto dma_err;
538 wrb = queue_head_node(txq);
539 wrb_fill(wrb, busaddr, frag->size);
540 be_dws_cpu_to_le(wrb, sizeof(*wrb));
541 queue_head_inc(txq);
542 copied += frag->size;
543 }
544
545 if (dummy_wrb) {
546 wrb = queue_head_node(txq);
547 wrb_fill(wrb, 0, 0);
548 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549 queue_head_inc(txq);
550 }
551
552 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
553 be_dws_cpu_to_le(hdr, sizeof(*hdr));
554
555 return copied;
556 dma_err:
557 txq->head = map_head;
558 while (copied) {
559 wrb = queue_head_node(txq);
560 unmap_tx_frag(dev, wrb, map_single);
561 map_single = false;
562 copied -= wrb->frag_len;
563 queue_head_inc(txq);
564 }
565 return 0;
566 }
567
568 static netdev_tx_t be_xmit(struct sk_buff *skb,
569 struct net_device *netdev)
570 {
571 struct be_adapter *adapter = netdev_priv(netdev);
572 struct be_tx_obj *tx_obj = &adapter->tx_obj;
573 struct be_queue_info *txq = &tx_obj->q;
574 u32 wrb_cnt = 0, copied = 0;
575 u32 start = txq->head;
576 bool dummy_wrb, stopped = false;
577
578 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
579
580 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
581 if (copied) {
582 /* record the sent skb in the sent_skb table */
583 BUG_ON(tx_obj->sent_skb_list[start]);
584 tx_obj->sent_skb_list[start] = skb;
585
586 /* Ensure txq has space for the next skb; Else stop the queue
587 * *BEFORE* ringing the tx doorbell, so that we serialze the
588 * tx compls of the current transmit which'll wake up the queue
589 */
590 atomic_add(wrb_cnt, &txq->used);
591 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
592 txq->len) {
593 netif_stop_queue(netdev);
594 stopped = true;
595 }
596
597 be_txq_notify(adapter, txq->id, wrb_cnt);
598
599 be_tx_stats_update(adapter, wrb_cnt, copied,
600 skb_shinfo(skb)->gso_segs, stopped);
601 } else {
602 txq->head = start;
603 dev_kfree_skb_any(skb);
604 }
605 return NETDEV_TX_OK;
606 }
607
608 static int be_change_mtu(struct net_device *netdev, int new_mtu)
609 {
610 struct be_adapter *adapter = netdev_priv(netdev);
611 if (new_mtu < BE_MIN_MTU ||
612 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613 (ETH_HLEN + ETH_FCS_LEN))) {
614 dev_info(&adapter->pdev->dev,
615 "MTU must be between %d and %d bytes\n",
616 BE_MIN_MTU,
617 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
618 return -EINVAL;
619 }
620 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621 netdev->mtu, new_mtu);
622 netdev->mtu = new_mtu;
623 return 0;
624 }
625
626 /*
627 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628 * If the user configures more, place BE in vlan promiscuous mode.
629 */
630 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
631 {
632 u16 vtag[BE_NUM_VLANS_SUPPORTED];
633 u16 ntags = 0, i;
634 int status = 0;
635 u32 if_handle;
636
637 if (vf) {
638 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
641 }
642
643 if (adapter->vlans_added <= adapter->max_vlans) {
644 /* Construct VLAN Table to give to HW */
645 for (i = 0; i < VLAN_N_VID; i++) {
646 if (adapter->vlan_tag[i]) {
647 vtag[ntags] = cpu_to_le16(i);
648 ntags++;
649 }
650 }
651 status = be_cmd_vlan_config(adapter, adapter->if_handle,
652 vtag, ntags, 1, 0);
653 } else {
654 status = be_cmd_vlan_config(adapter, adapter->if_handle,
655 NULL, 0, 1, 1);
656 }
657
658 return status;
659 }
660
661 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
662 {
663 struct be_adapter *adapter = netdev_priv(netdev);
664
665 adapter->vlan_grp = grp;
666 }
667
668 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
669 {
670 struct be_adapter *adapter = netdev_priv(netdev);
671
672 adapter->vlans_added++;
673 if (!be_physfn(adapter))
674 return;
675
676 adapter->vlan_tag[vid] = 1;
677 if (adapter->vlans_added <= (adapter->max_vlans + 1))
678 be_vid_config(adapter, false, 0);
679 }
680
681 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
682 {
683 struct be_adapter *adapter = netdev_priv(netdev);
684
685 adapter->vlans_added--;
686 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
687
688 if (!be_physfn(adapter))
689 return;
690
691 adapter->vlan_tag[vid] = 0;
692 if (adapter->vlans_added <= adapter->max_vlans)
693 be_vid_config(adapter, false, 0);
694 }
695
696 static void be_set_multicast_list(struct net_device *netdev)
697 {
698 struct be_adapter *adapter = netdev_priv(netdev);
699
700 if (netdev->flags & IFF_PROMISC) {
701 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
702 adapter->promiscuous = true;
703 goto done;
704 }
705
706 /* BE was previously in promiscous mode; disable it */
707 if (adapter->promiscuous) {
708 adapter->promiscuous = false;
709 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
710 }
711
712 /* Enable multicast promisc if num configured exceeds what we support */
713 if (netdev->flags & IFF_ALLMULTI ||
714 netdev_mc_count(netdev) > BE_MAX_MC) {
715 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
716 &adapter->mc_cmd_mem);
717 goto done;
718 }
719
720 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
721 &adapter->mc_cmd_mem);
722 done:
723 return;
724 }
725
726 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
727 {
728 struct be_adapter *adapter = netdev_priv(netdev);
729 int status;
730
731 if (!adapter->sriov_enabled)
732 return -EPERM;
733
734 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
735 return -EINVAL;
736
737 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738 status = be_cmd_pmac_del(adapter,
739 adapter->vf_cfg[vf].vf_if_handle,
740 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
741
742 status = be_cmd_pmac_add(adapter, mac,
743 adapter->vf_cfg[vf].vf_if_handle,
744 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
745
746 if (status)
747 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748 mac, vf);
749 else
750 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
751
752 return status;
753 }
754
755 static int be_get_vf_config(struct net_device *netdev, int vf,
756 struct ifla_vf_info *vi)
757 {
758 struct be_adapter *adapter = netdev_priv(netdev);
759
760 if (!adapter->sriov_enabled)
761 return -EPERM;
762
763 if (vf >= num_vfs)
764 return -EINVAL;
765
766 vi->vf = vf;
767 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
768 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
769 vi->qos = 0;
770 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
771
772 return 0;
773 }
774
775 static int be_set_vf_vlan(struct net_device *netdev,
776 int vf, u16 vlan, u8 qos)
777 {
778 struct be_adapter *adapter = netdev_priv(netdev);
779 int status = 0;
780
781 if (!adapter->sriov_enabled)
782 return -EPERM;
783
784 if ((vf >= num_vfs) || (vlan > 4095))
785 return -EINVAL;
786
787 if (vlan) {
788 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789 adapter->vlans_added++;
790 } else {
791 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792 adapter->vlans_added--;
793 }
794
795 status = be_vid_config(adapter, true, vf);
796
797 if (status)
798 dev_info(&adapter->pdev->dev,
799 "VLAN %d config on VF %d failed\n", vlan, vf);
800 return status;
801 }
802
803 static int be_set_vf_tx_rate(struct net_device *netdev,
804 int vf, int rate)
805 {
806 struct be_adapter *adapter = netdev_priv(netdev);
807 int status = 0;
808
809 if (!adapter->sriov_enabled)
810 return -EPERM;
811
812 if ((vf >= num_vfs) || (rate < 0))
813 return -EINVAL;
814
815 if (rate > 10000)
816 rate = 10000;
817
818 adapter->vf_cfg[vf].vf_tx_rate = rate;
819 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
820
821 if (status)
822 dev_info(&adapter->pdev->dev,
823 "tx rate %d on VF %d failed\n", rate, vf);
824 return status;
825 }
826
827 static void be_rx_rate_update(struct be_rx_obj *rxo)
828 {
829 struct be_rx_stats *stats = &rxo->stats;
830 ulong now = jiffies;
831
832 /* Wrapped around */
833 if (time_before(now, stats->rx_jiffies)) {
834 stats->rx_jiffies = now;
835 return;
836 }
837
838 /* Update the rate once in two seconds */
839 if ((now - stats->rx_jiffies) < 2 * HZ)
840 return;
841
842 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843 now - stats->rx_jiffies);
844 stats->rx_jiffies = now;
845 stats->rx_bytes_prev = stats->rx_bytes;
846 }
847
848 static void be_rx_stats_update(struct be_rx_obj *rxo,
849 struct be_rx_compl_info *rxcp)
850 {
851 struct be_rx_stats *stats = &rxo->stats;
852
853 stats->rx_compl++;
854 stats->rx_frags += rxcp->num_rcvd;
855 stats->rx_bytes += rxcp->pkt_size;
856 stats->rx_pkts++;
857 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
858 stats->rx_mcast_pkts++;
859 if (rxcp->err)
860 stats->rxcp_err++;
861 }
862
863 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
864 {
865 /* L4 checksum is not reliable for non TCP/UDP packets.
866 * Also ignore ipcksm for ipv6 pkts */
867 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868 (rxcp->ip_csum || rxcp->ipv6);
869 }
870
871 static struct be_rx_page_info *
872 get_rx_page_info(struct be_adapter *adapter,
873 struct be_rx_obj *rxo,
874 u16 frag_idx)
875 {
876 struct be_rx_page_info *rx_page_info;
877 struct be_queue_info *rxq = &rxo->q;
878
879 rx_page_info = &rxo->page_info_tbl[frag_idx];
880 BUG_ON(!rx_page_info->page);
881
882 if (rx_page_info->last_page_user) {
883 dma_unmap_page(&adapter->pdev->dev,
884 dma_unmap_addr(rx_page_info, bus),
885 adapter->big_page_size, DMA_FROM_DEVICE);
886 rx_page_info->last_page_user = false;
887 }
888
889 atomic_dec(&rxq->used);
890 return rx_page_info;
891 }
892
893 /* Throwaway the data in the Rx completion */
894 static void be_rx_compl_discard(struct be_adapter *adapter,
895 struct be_rx_obj *rxo,
896 struct be_rx_compl_info *rxcp)
897 {
898 struct be_queue_info *rxq = &rxo->q;
899 struct be_rx_page_info *page_info;
900 u16 i, num_rcvd = rxcp->num_rcvd;
901
902 for (i = 0; i < num_rcvd; i++) {
903 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
904 put_page(page_info->page);
905 memset(page_info, 0, sizeof(*page_info));
906 index_inc(&rxcp->rxq_idx, rxq->len);
907 }
908 }
909
910 /*
911 * skb_fill_rx_data forms a complete skb for an ether frame
912 * indicated by rxcp.
913 */
914 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
915 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
916 {
917 struct be_queue_info *rxq = &rxo->q;
918 struct be_rx_page_info *page_info;
919 u16 i, j;
920 u16 hdr_len, curr_frag_len, remaining;
921 u8 *start;
922
923 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
924 start = page_address(page_info->page) + page_info->page_offset;
925 prefetch(start);
926
927 /* Copy data in the first descriptor of this completion */
928 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
929
930 /* Copy the header portion into skb_data */
931 hdr_len = min(BE_HDR_LEN, curr_frag_len);
932 memcpy(skb->data, start, hdr_len);
933 skb->len = curr_frag_len;
934 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935 /* Complete packet has now been moved to data */
936 put_page(page_info->page);
937 skb->data_len = 0;
938 skb->tail += curr_frag_len;
939 } else {
940 skb_shinfo(skb)->nr_frags = 1;
941 skb_shinfo(skb)->frags[0].page = page_info->page;
942 skb_shinfo(skb)->frags[0].page_offset =
943 page_info->page_offset + hdr_len;
944 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945 skb->data_len = curr_frag_len - hdr_len;
946 skb->tail += hdr_len;
947 }
948 page_info->page = NULL;
949
950 if (rxcp->pkt_size <= rx_frag_size) {
951 BUG_ON(rxcp->num_rcvd != 1);
952 return;
953 }
954
955 /* More frags present for this completion */
956 index_inc(&rxcp->rxq_idx, rxq->len);
957 remaining = rxcp->pkt_size - curr_frag_len;
958 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960 curr_frag_len = min(remaining, rx_frag_size);
961
962 /* Coalesce all frags from the same physical page in one slot */
963 if (page_info->page_offset == 0) {
964 /* Fresh page */
965 j++;
966 skb_shinfo(skb)->frags[j].page = page_info->page;
967 skb_shinfo(skb)->frags[j].page_offset =
968 page_info->page_offset;
969 skb_shinfo(skb)->frags[j].size = 0;
970 skb_shinfo(skb)->nr_frags++;
971 } else {
972 put_page(page_info->page);
973 }
974
975 skb_shinfo(skb)->frags[j].size += curr_frag_len;
976 skb->len += curr_frag_len;
977 skb->data_len += curr_frag_len;
978
979 remaining -= curr_frag_len;
980 index_inc(&rxcp->rxq_idx, rxq->len);
981 page_info->page = NULL;
982 }
983 BUG_ON(j > MAX_SKB_FRAGS);
984 }
985
986 /* Process the RX completion indicated by rxcp when GRO is disabled */
987 static void be_rx_compl_process(struct be_adapter *adapter,
988 struct be_rx_obj *rxo,
989 struct be_rx_compl_info *rxcp)
990 {
991 struct sk_buff *skb;
992
993 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
994 if (unlikely(!skb)) {
995 if (net_ratelimit())
996 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
997 be_rx_compl_discard(adapter, rxo, rxcp);
998 return;
999 }
1000
1001 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1002
1003 if (likely(adapter->rx_csum && csum_passed(rxcp)))
1004 skb->ip_summed = CHECKSUM_UNNECESSARY;
1005 else
1006 skb_checksum_none_assert(skb);
1007
1008 skb->truesize = skb->len + sizeof(struct sk_buff);
1009 skb->protocol = eth_type_trans(skb, adapter->netdev);
1010 if (adapter->netdev->features & NETIF_F_RXHASH)
1011 skb->rxhash = rxcp->rss_hash;
1012
1013
1014 if (unlikely(rxcp->vlanf)) {
1015 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1016 kfree_skb(skb);
1017 return;
1018 }
1019 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
1020 } else {
1021 netif_receive_skb(skb);
1022 }
1023 }
1024
1025 /* Process the RX completion indicated by rxcp when GRO is enabled */
1026 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1027 struct be_rx_obj *rxo,
1028 struct be_rx_compl_info *rxcp)
1029 {
1030 struct be_rx_page_info *page_info;
1031 struct sk_buff *skb = NULL;
1032 struct be_queue_info *rxq = &rxo->q;
1033 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1034 u16 remaining, curr_frag_len;
1035 u16 i, j;
1036
1037 skb = napi_get_frags(&eq_obj->napi);
1038 if (!skb) {
1039 be_rx_compl_discard(adapter, rxo, rxcp);
1040 return;
1041 }
1042
1043 remaining = rxcp->pkt_size;
1044 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1045 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1046
1047 curr_frag_len = min(remaining, rx_frag_size);
1048
1049 /* Coalesce all frags from the same physical page in one slot */
1050 if (i == 0 || page_info->page_offset == 0) {
1051 /* First frag or Fresh page */
1052 j++;
1053 skb_shinfo(skb)->frags[j].page = page_info->page;
1054 skb_shinfo(skb)->frags[j].page_offset =
1055 page_info->page_offset;
1056 skb_shinfo(skb)->frags[j].size = 0;
1057 } else {
1058 put_page(page_info->page);
1059 }
1060 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1061
1062 remaining -= curr_frag_len;
1063 index_inc(&rxcp->rxq_idx, rxq->len);
1064 memset(page_info, 0, sizeof(*page_info));
1065 }
1066 BUG_ON(j > MAX_SKB_FRAGS);
1067
1068 skb_shinfo(skb)->nr_frags = j + 1;
1069 skb->len = rxcp->pkt_size;
1070 skb->data_len = rxcp->pkt_size;
1071 skb->truesize += rxcp->pkt_size;
1072 skb->ip_summed = CHECKSUM_UNNECESSARY;
1073 if (adapter->netdev->features & NETIF_F_RXHASH)
1074 skb->rxhash = rxcp->rss_hash;
1075
1076 if (likely(!rxcp->vlanf))
1077 napi_gro_frags(&eq_obj->napi);
1078 else
1079 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1080 }
1081
1082 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1083 struct be_eth_rx_compl *compl,
1084 struct be_rx_compl_info *rxcp)
1085 {
1086 rxcp->pkt_size =
1087 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1088 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1089 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1090 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1091 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1092 rxcp->ip_csum =
1093 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1094 rxcp->l4_csum =
1095 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1096 rxcp->ipv6 =
1097 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1098 rxcp->rxq_idx =
1099 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1100 rxcp->num_rcvd =
1101 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1102 rxcp->pkt_type =
1103 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1104 rxcp->rss_hash =
1105 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1106 if (rxcp->vlanf) {
1107 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1108 compl);
1109 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1110 compl);
1111 }
1112 }
1113
1114 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1115 struct be_eth_rx_compl *compl,
1116 struct be_rx_compl_info *rxcp)
1117 {
1118 rxcp->pkt_size =
1119 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1120 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1121 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1122 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1123 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1124 rxcp->ip_csum =
1125 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1126 rxcp->l4_csum =
1127 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1128 rxcp->ipv6 =
1129 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1130 rxcp->rxq_idx =
1131 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1132 rxcp->num_rcvd =
1133 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1134 rxcp->pkt_type =
1135 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1136 rxcp->rss_hash =
1137 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1138 if (rxcp->vlanf) {
1139 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1140 compl);
1141 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1142 compl);
1143 }
1144 }
1145
1146 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1147 {
1148 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1149 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1150 struct be_adapter *adapter = rxo->adapter;
1151
1152 /* For checking the valid bit it is Ok to use either definition as the
1153 * valid bit is at the same position in both v0 and v1 Rx compl */
1154 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1155 return NULL;
1156
1157 rmb();
1158 be_dws_le_to_cpu(compl, sizeof(*compl));
1159
1160 if (adapter->be3_native)
1161 be_parse_rx_compl_v1(adapter, compl, rxcp);
1162 else
1163 be_parse_rx_compl_v0(adapter, compl, rxcp);
1164
1165 if (rxcp->vlanf) {
1166 /* vlanf could be wrongly set in some cards.
1167 * ignore if vtm is not set */
1168 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1169 rxcp->vlanf = 0;
1170
1171 if (!lancer_chip(adapter))
1172 rxcp->vid = swab16(rxcp->vid);
1173
1174 if ((adapter->pvid == rxcp->vid) &&
1175 !adapter->vlan_tag[rxcp->vid])
1176 rxcp->vlanf = 0;
1177 }
1178
1179 /* As the compl has been parsed, reset it; we wont touch it again */
1180 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1181
1182 queue_tail_inc(&rxo->cq);
1183 return rxcp;
1184 }
1185
1186 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1187 {
1188 u32 order = get_order(size);
1189
1190 if (order > 0)
1191 gfp |= __GFP_COMP;
1192 return alloc_pages(gfp, order);
1193 }
1194
1195 /*
1196 * Allocate a page, split it to fragments of size rx_frag_size and post as
1197 * receive buffers to BE
1198 */
1199 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1200 {
1201 struct be_adapter *adapter = rxo->adapter;
1202 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1203 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1204 struct be_queue_info *rxq = &rxo->q;
1205 struct page *pagep = NULL;
1206 struct be_eth_rx_d *rxd;
1207 u64 page_dmaaddr = 0, frag_dmaaddr;
1208 u32 posted, page_offset = 0;
1209
1210 page_info = &rxo->page_info_tbl[rxq->head];
1211 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1212 if (!pagep) {
1213 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1214 if (unlikely(!pagep)) {
1215 rxo->stats.rx_post_fail++;
1216 break;
1217 }
1218 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1219 0, adapter->big_page_size,
1220 DMA_FROM_DEVICE);
1221 page_info->page_offset = 0;
1222 } else {
1223 get_page(pagep);
1224 page_info->page_offset = page_offset + rx_frag_size;
1225 }
1226 page_offset = page_info->page_offset;
1227 page_info->page = pagep;
1228 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1229 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1230
1231 rxd = queue_head_node(rxq);
1232 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1233 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1234
1235 /* Any space left in the current big page for another frag? */
1236 if ((page_offset + rx_frag_size + rx_frag_size) >
1237 adapter->big_page_size) {
1238 pagep = NULL;
1239 page_info->last_page_user = true;
1240 }
1241
1242 prev_page_info = page_info;
1243 queue_head_inc(rxq);
1244 page_info = &page_info_tbl[rxq->head];
1245 }
1246 if (pagep)
1247 prev_page_info->last_page_user = true;
1248
1249 if (posted) {
1250 atomic_add(posted, &rxq->used);
1251 be_rxq_notify(adapter, rxq->id, posted);
1252 } else if (atomic_read(&rxq->used) == 0) {
1253 /* Let be_worker replenish when memory is available */
1254 rxo->rx_post_starved = true;
1255 }
1256 }
1257
1258 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1259 {
1260 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1261
1262 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1263 return NULL;
1264
1265 rmb();
1266 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1267
1268 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1269
1270 queue_tail_inc(tx_cq);
1271 return txcp;
1272 }
1273
1274 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1275 {
1276 struct be_queue_info *txq = &adapter->tx_obj.q;
1277 struct be_eth_wrb *wrb;
1278 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1279 struct sk_buff *sent_skb;
1280 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1281 bool unmap_skb_hdr = true;
1282
1283 sent_skb = sent_skbs[txq->tail];
1284 BUG_ON(!sent_skb);
1285 sent_skbs[txq->tail] = NULL;
1286
1287 /* skip header wrb */
1288 queue_tail_inc(txq);
1289
1290 do {
1291 cur_index = txq->tail;
1292 wrb = queue_tail_node(txq);
1293 unmap_tx_frag(&adapter->pdev->dev, wrb,
1294 (unmap_skb_hdr && skb_headlen(sent_skb)));
1295 unmap_skb_hdr = false;
1296
1297 num_wrbs++;
1298 queue_tail_inc(txq);
1299 } while (cur_index != last_index);
1300
1301 atomic_sub(num_wrbs, &txq->used);
1302
1303 kfree_skb(sent_skb);
1304 }
1305
1306 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1307 {
1308 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1309
1310 if (!eqe->evt)
1311 return NULL;
1312
1313 rmb();
1314 eqe->evt = le32_to_cpu(eqe->evt);
1315 queue_tail_inc(&eq_obj->q);
1316 return eqe;
1317 }
1318
1319 static int event_handle(struct be_adapter *adapter,
1320 struct be_eq_obj *eq_obj)
1321 {
1322 struct be_eq_entry *eqe;
1323 u16 num = 0;
1324
1325 while ((eqe = event_get(eq_obj)) != NULL) {
1326 eqe->evt = 0;
1327 num++;
1328 }
1329
1330 /* Deal with any spurious interrupts that come
1331 * without events
1332 */
1333 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1334 if (num)
1335 napi_schedule(&eq_obj->napi);
1336
1337 return num;
1338 }
1339
1340 /* Just read and notify events without processing them.
1341 * Used at the time of destroying event queues */
1342 static void be_eq_clean(struct be_adapter *adapter,
1343 struct be_eq_obj *eq_obj)
1344 {
1345 struct be_eq_entry *eqe;
1346 u16 num = 0;
1347
1348 while ((eqe = event_get(eq_obj)) != NULL) {
1349 eqe->evt = 0;
1350 num++;
1351 }
1352
1353 if (num)
1354 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1355 }
1356
1357 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1358 {
1359 struct be_rx_page_info *page_info;
1360 struct be_queue_info *rxq = &rxo->q;
1361 struct be_queue_info *rx_cq = &rxo->cq;
1362 struct be_rx_compl_info *rxcp;
1363 u16 tail;
1364
1365 /* First cleanup pending rx completions */
1366 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1367 be_rx_compl_discard(adapter, rxo, rxcp);
1368 be_cq_notify(adapter, rx_cq->id, false, 1);
1369 }
1370
1371 /* Then free posted rx buffer that were not used */
1372 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1373 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1374 page_info = get_rx_page_info(adapter, rxo, tail);
1375 put_page(page_info->page);
1376 memset(page_info, 0, sizeof(*page_info));
1377 }
1378 BUG_ON(atomic_read(&rxq->used));
1379 }
1380
1381 static void be_tx_compl_clean(struct be_adapter *adapter)
1382 {
1383 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1384 struct be_queue_info *txq = &adapter->tx_obj.q;
1385 struct be_eth_tx_compl *txcp;
1386 u16 end_idx, cmpl = 0, timeo = 0;
1387 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1388 struct sk_buff *sent_skb;
1389 bool dummy_wrb;
1390
1391 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1392 do {
1393 while ((txcp = be_tx_compl_get(tx_cq))) {
1394 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1395 wrb_index, txcp);
1396 be_tx_compl_process(adapter, end_idx);
1397 cmpl++;
1398 }
1399 if (cmpl) {
1400 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1401 cmpl = 0;
1402 }
1403
1404 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1405 break;
1406
1407 mdelay(1);
1408 } while (true);
1409
1410 if (atomic_read(&txq->used))
1411 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1412 atomic_read(&txq->used));
1413
1414 /* free posted tx for which compls will never arrive */
1415 while (atomic_read(&txq->used)) {
1416 sent_skb = sent_skbs[txq->tail];
1417 end_idx = txq->tail;
1418 index_adv(&end_idx,
1419 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1420 txq->len);
1421 be_tx_compl_process(adapter, end_idx);
1422 }
1423 }
1424
1425 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1426 {
1427 struct be_queue_info *q;
1428
1429 q = &adapter->mcc_obj.q;
1430 if (q->created)
1431 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1432 be_queue_free(adapter, q);
1433
1434 q = &adapter->mcc_obj.cq;
1435 if (q->created)
1436 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1437 be_queue_free(adapter, q);
1438 }
1439
1440 /* Must be called only after TX qs are created as MCC shares TX EQ */
1441 static int be_mcc_queues_create(struct be_adapter *adapter)
1442 {
1443 struct be_queue_info *q, *cq;
1444
1445 /* Alloc MCC compl queue */
1446 cq = &adapter->mcc_obj.cq;
1447 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1448 sizeof(struct be_mcc_compl)))
1449 goto err;
1450
1451 /* Ask BE to create MCC compl queue; share TX's eq */
1452 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1453 goto mcc_cq_free;
1454
1455 /* Alloc MCC queue */
1456 q = &adapter->mcc_obj.q;
1457 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1458 goto mcc_cq_destroy;
1459
1460 /* Ask BE to create MCC queue */
1461 if (be_cmd_mccq_create(adapter, q, cq))
1462 goto mcc_q_free;
1463
1464 return 0;
1465
1466 mcc_q_free:
1467 be_queue_free(adapter, q);
1468 mcc_cq_destroy:
1469 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1470 mcc_cq_free:
1471 be_queue_free(adapter, cq);
1472 err:
1473 return -1;
1474 }
1475
1476 static void be_tx_queues_destroy(struct be_adapter *adapter)
1477 {
1478 struct be_queue_info *q;
1479
1480 q = &adapter->tx_obj.q;
1481 if (q->created)
1482 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1483 be_queue_free(adapter, q);
1484
1485 q = &adapter->tx_obj.cq;
1486 if (q->created)
1487 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1488 be_queue_free(adapter, q);
1489
1490 /* Clear any residual events */
1491 be_eq_clean(adapter, &adapter->tx_eq);
1492
1493 q = &adapter->tx_eq.q;
1494 if (q->created)
1495 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1496 be_queue_free(adapter, q);
1497 }
1498
1499 static int be_tx_queues_create(struct be_adapter *adapter)
1500 {
1501 struct be_queue_info *eq, *q, *cq;
1502
1503 adapter->tx_eq.max_eqd = 0;
1504 adapter->tx_eq.min_eqd = 0;
1505 adapter->tx_eq.cur_eqd = 96;
1506 adapter->tx_eq.enable_aic = false;
1507 /* Alloc Tx Event queue */
1508 eq = &adapter->tx_eq.q;
1509 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1510 return -1;
1511
1512 /* Ask BE to create Tx Event queue */
1513 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1514 goto tx_eq_free;
1515
1516 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1517
1518
1519 /* Alloc TX eth compl queue */
1520 cq = &adapter->tx_obj.cq;
1521 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1522 sizeof(struct be_eth_tx_compl)))
1523 goto tx_eq_destroy;
1524
1525 /* Ask BE to create Tx eth compl queue */
1526 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1527 goto tx_cq_free;
1528
1529 /* Alloc TX eth queue */
1530 q = &adapter->tx_obj.q;
1531 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1532 goto tx_cq_destroy;
1533
1534 /* Ask BE to create Tx eth queue */
1535 if (be_cmd_txq_create(adapter, q, cq))
1536 goto tx_q_free;
1537 return 0;
1538
1539 tx_q_free:
1540 be_queue_free(adapter, q);
1541 tx_cq_destroy:
1542 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1543 tx_cq_free:
1544 be_queue_free(adapter, cq);
1545 tx_eq_destroy:
1546 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1547 tx_eq_free:
1548 be_queue_free(adapter, eq);
1549 return -1;
1550 }
1551
1552 static void be_rx_queues_destroy(struct be_adapter *adapter)
1553 {
1554 struct be_queue_info *q;
1555 struct be_rx_obj *rxo;
1556 int i;
1557
1558 for_all_rx_queues(adapter, rxo, i) {
1559 q = &rxo->q;
1560 if (q->created) {
1561 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1562 /* After the rxq is invalidated, wait for a grace time
1563 * of 1ms for all dma to end and the flush compl to
1564 * arrive
1565 */
1566 mdelay(1);
1567 be_rx_q_clean(adapter, rxo);
1568 }
1569 be_queue_free(adapter, q);
1570
1571 q = &rxo->cq;
1572 if (q->created)
1573 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1574 be_queue_free(adapter, q);
1575
1576 /* Clear any residual events */
1577 q = &rxo->rx_eq.q;
1578 if (q->created) {
1579 be_eq_clean(adapter, &rxo->rx_eq);
1580 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1581 }
1582 be_queue_free(adapter, q);
1583 }
1584 }
1585
1586 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1587 {
1588 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1589 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1590 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1591 } else {
1592 dev_warn(&adapter->pdev->dev,
1593 "No support for multiple RX queues\n");
1594 return 1;
1595 }
1596 }
1597
1598 static int be_rx_queues_create(struct be_adapter *adapter)
1599 {
1600 struct be_queue_info *eq, *q, *cq;
1601 struct be_rx_obj *rxo;
1602 int rc, i;
1603
1604 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1605 msix_enabled(adapter) ?
1606 adapter->num_msix_vec - 1 : 1);
1607 if (adapter->num_rx_qs != MAX_RX_QS)
1608 dev_warn(&adapter->pdev->dev,
1609 "Can create only %d RX queues", adapter->num_rx_qs);
1610
1611 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1612 for_all_rx_queues(adapter, rxo, i) {
1613 rxo->adapter = adapter;
1614 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1615 rxo->rx_eq.enable_aic = true;
1616
1617 /* EQ */
1618 eq = &rxo->rx_eq.q;
1619 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1620 sizeof(struct be_eq_entry));
1621 if (rc)
1622 goto err;
1623
1624 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1625 if (rc)
1626 goto err;
1627
1628 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1629
1630 /* CQ */
1631 cq = &rxo->cq;
1632 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1633 sizeof(struct be_eth_rx_compl));
1634 if (rc)
1635 goto err;
1636
1637 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1638 if (rc)
1639 goto err;
1640 /* Rx Q */
1641 q = &rxo->q;
1642 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1643 sizeof(struct be_eth_rx_d));
1644 if (rc)
1645 goto err;
1646
1647 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1648 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1649 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1650 if (rc)
1651 goto err;
1652 }
1653
1654 if (be_multi_rxq(adapter)) {
1655 u8 rsstable[MAX_RSS_QS];
1656
1657 for_all_rss_queues(adapter, rxo, i)
1658 rsstable[i] = rxo->rss_id;
1659
1660 rc = be_cmd_rss_config(adapter, rsstable,
1661 adapter->num_rx_qs - 1);
1662 if (rc)
1663 goto err;
1664 }
1665
1666 return 0;
1667 err:
1668 be_rx_queues_destroy(adapter);
1669 return -1;
1670 }
1671
1672 static bool event_peek(struct be_eq_obj *eq_obj)
1673 {
1674 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1675 if (!eqe->evt)
1676 return false;
1677 else
1678 return true;
1679 }
1680
1681 static irqreturn_t be_intx(int irq, void *dev)
1682 {
1683 struct be_adapter *adapter = dev;
1684 struct be_rx_obj *rxo;
1685 int isr, i, tx = 0 , rx = 0;
1686
1687 if (lancer_chip(adapter)) {
1688 if (event_peek(&adapter->tx_eq))
1689 tx = event_handle(adapter, &adapter->tx_eq);
1690 for_all_rx_queues(adapter, rxo, i) {
1691 if (event_peek(&rxo->rx_eq))
1692 rx |= event_handle(adapter, &rxo->rx_eq);
1693 }
1694
1695 if (!(tx || rx))
1696 return IRQ_NONE;
1697
1698 } else {
1699 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1700 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1701 if (!isr)
1702 return IRQ_NONE;
1703
1704 if ((1 << adapter->tx_eq.eq_idx & isr))
1705 event_handle(adapter, &adapter->tx_eq);
1706
1707 for_all_rx_queues(adapter, rxo, i) {
1708 if ((1 << rxo->rx_eq.eq_idx & isr))
1709 event_handle(adapter, &rxo->rx_eq);
1710 }
1711 }
1712
1713 return IRQ_HANDLED;
1714 }
1715
1716 static irqreturn_t be_msix_rx(int irq, void *dev)
1717 {
1718 struct be_rx_obj *rxo = dev;
1719 struct be_adapter *adapter = rxo->adapter;
1720
1721 event_handle(adapter, &rxo->rx_eq);
1722
1723 return IRQ_HANDLED;
1724 }
1725
1726 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1727 {
1728 struct be_adapter *adapter = dev;
1729
1730 event_handle(adapter, &adapter->tx_eq);
1731
1732 return IRQ_HANDLED;
1733 }
1734
1735 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1736 {
1737 return (rxcp->tcpf && !rxcp->err) ? true : false;
1738 }
1739
1740 static int be_poll_rx(struct napi_struct *napi, int budget)
1741 {
1742 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1743 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1744 struct be_adapter *adapter = rxo->adapter;
1745 struct be_queue_info *rx_cq = &rxo->cq;
1746 struct be_rx_compl_info *rxcp;
1747 u32 work_done;
1748
1749 rxo->stats.rx_polls++;
1750 for (work_done = 0; work_done < budget; work_done++) {
1751 rxcp = be_rx_compl_get(rxo);
1752 if (!rxcp)
1753 break;
1754
1755 /* Ignore flush completions */
1756 if (rxcp->num_rcvd) {
1757 if (do_gro(rxcp))
1758 be_rx_compl_process_gro(adapter, rxo, rxcp);
1759 else
1760 be_rx_compl_process(adapter, rxo, rxcp);
1761 }
1762 be_rx_stats_update(rxo, rxcp);
1763 }
1764
1765 /* Refill the queue */
1766 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1767 be_post_rx_frags(rxo, GFP_ATOMIC);
1768
1769 /* All consumed */
1770 if (work_done < budget) {
1771 napi_complete(napi);
1772 be_cq_notify(adapter, rx_cq->id, true, work_done);
1773 } else {
1774 /* More to be consumed; continue with interrupts disabled */
1775 be_cq_notify(adapter, rx_cq->id, false, work_done);
1776 }
1777 return work_done;
1778 }
1779
1780 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1781 * For TX/MCC we don't honour budget; consume everything
1782 */
1783 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1784 {
1785 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1786 struct be_adapter *adapter =
1787 container_of(tx_eq, struct be_adapter, tx_eq);
1788 struct be_queue_info *txq = &adapter->tx_obj.q;
1789 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1790 struct be_eth_tx_compl *txcp;
1791 int tx_compl = 0, mcc_compl, status = 0;
1792 u16 end_idx;
1793
1794 while ((txcp = be_tx_compl_get(tx_cq))) {
1795 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1796 wrb_index, txcp);
1797 be_tx_compl_process(adapter, end_idx);
1798 tx_compl++;
1799 }
1800
1801 mcc_compl = be_process_mcc(adapter, &status);
1802
1803 napi_complete(napi);
1804
1805 if (mcc_compl) {
1806 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1807 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1808 }
1809
1810 if (tx_compl) {
1811 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1812
1813 /* As Tx wrbs have been freed up, wake up netdev queue if
1814 * it was stopped due to lack of tx wrbs.
1815 */
1816 if (netif_queue_stopped(adapter->netdev) &&
1817 atomic_read(&txq->used) < txq->len / 2) {
1818 netif_wake_queue(adapter->netdev);
1819 }
1820
1821 tx_stats(adapter)->be_tx_events++;
1822 tx_stats(adapter)->be_tx_compl += tx_compl;
1823 }
1824
1825 return 1;
1826 }
1827
1828 void be_detect_dump_ue(struct be_adapter *adapter)
1829 {
1830 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1831 u32 i;
1832
1833 pci_read_config_dword(adapter->pdev,
1834 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1835 pci_read_config_dword(adapter->pdev,
1836 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1837 pci_read_config_dword(adapter->pdev,
1838 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1839 pci_read_config_dword(adapter->pdev,
1840 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1841
1842 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1843 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1844
1845 if (ue_status_lo || ue_status_hi) {
1846 adapter->ue_detected = true;
1847 adapter->eeh_err = true;
1848 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1849 }
1850
1851 if (ue_status_lo) {
1852 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1853 if (ue_status_lo & 1)
1854 dev_err(&adapter->pdev->dev,
1855 "UE: %s bit set\n", ue_status_low_desc[i]);
1856 }
1857 }
1858 if (ue_status_hi) {
1859 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1860 if (ue_status_hi & 1)
1861 dev_err(&adapter->pdev->dev,
1862 "UE: %s bit set\n", ue_status_hi_desc[i]);
1863 }
1864 }
1865
1866 }
1867
1868 static void be_worker(struct work_struct *work)
1869 {
1870 struct be_adapter *adapter =
1871 container_of(work, struct be_adapter, work.work);
1872 struct be_rx_obj *rxo;
1873 int i;
1874
1875 if (!adapter->ue_detected && !lancer_chip(adapter))
1876 be_detect_dump_ue(adapter);
1877
1878 /* when interrupts are not yet enabled, just reap any pending
1879 * mcc completions */
1880 if (!netif_running(adapter->netdev)) {
1881 int mcc_compl, status = 0;
1882
1883 mcc_compl = be_process_mcc(adapter, &status);
1884
1885 if (mcc_compl) {
1886 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1887 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1888 }
1889
1890 goto reschedule;
1891 }
1892
1893 if (!adapter->stats_cmd_sent)
1894 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1895
1896 be_tx_rate_update(adapter);
1897
1898 for_all_rx_queues(adapter, rxo, i) {
1899 be_rx_rate_update(rxo);
1900 be_rx_eqd_update(adapter, rxo);
1901
1902 if (rxo->rx_post_starved) {
1903 rxo->rx_post_starved = false;
1904 be_post_rx_frags(rxo, GFP_KERNEL);
1905 }
1906 }
1907
1908 reschedule:
1909 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1910 }
1911
1912 static void be_msix_disable(struct be_adapter *adapter)
1913 {
1914 if (msix_enabled(adapter)) {
1915 pci_disable_msix(adapter->pdev);
1916 adapter->num_msix_vec = 0;
1917 }
1918 }
1919
1920 static void be_msix_enable(struct be_adapter *adapter)
1921 {
1922 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1923 int i, status, num_vec;
1924
1925 num_vec = be_num_rxqs_want(adapter) + 1;
1926
1927 for (i = 0; i < num_vec; i++)
1928 adapter->msix_entries[i].entry = i;
1929
1930 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
1931 if (status == 0) {
1932 goto done;
1933 } else if (status >= BE_MIN_MSIX_VECTORS) {
1934 num_vec = status;
1935 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1936 num_vec) == 0)
1937 goto done;
1938 }
1939 return;
1940 done:
1941 adapter->num_msix_vec = num_vec;
1942 return;
1943 }
1944
1945 static void be_sriov_enable(struct be_adapter *adapter)
1946 {
1947 be_check_sriov_fn_type(adapter);
1948 #ifdef CONFIG_PCI_IOV
1949 if (be_physfn(adapter) && num_vfs) {
1950 int status, pos;
1951 u16 nvfs;
1952
1953 pos = pci_find_ext_capability(adapter->pdev,
1954 PCI_EXT_CAP_ID_SRIOV);
1955 pci_read_config_word(adapter->pdev,
1956 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1957
1958 if (num_vfs > nvfs) {
1959 dev_info(&adapter->pdev->dev,
1960 "Device supports %d VFs and not %d\n",
1961 nvfs, num_vfs);
1962 num_vfs = nvfs;
1963 }
1964
1965 status = pci_enable_sriov(adapter->pdev, num_vfs);
1966 adapter->sriov_enabled = status ? false : true;
1967 }
1968 #endif
1969 }
1970
1971 static void be_sriov_disable(struct be_adapter *adapter)
1972 {
1973 #ifdef CONFIG_PCI_IOV
1974 if (adapter->sriov_enabled) {
1975 pci_disable_sriov(adapter->pdev);
1976 adapter->sriov_enabled = false;
1977 }
1978 #endif
1979 }
1980
1981 static inline int be_msix_vec_get(struct be_adapter *adapter,
1982 struct be_eq_obj *eq_obj)
1983 {
1984 return adapter->msix_entries[eq_obj->eq_idx].vector;
1985 }
1986
1987 static int be_request_irq(struct be_adapter *adapter,
1988 struct be_eq_obj *eq_obj,
1989 void *handler, char *desc, void *context)
1990 {
1991 struct net_device *netdev = adapter->netdev;
1992 int vec;
1993
1994 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1995 vec = be_msix_vec_get(adapter, eq_obj);
1996 return request_irq(vec, handler, 0, eq_obj->desc, context);
1997 }
1998
1999 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2000 void *context)
2001 {
2002 int vec = be_msix_vec_get(adapter, eq_obj);
2003 free_irq(vec, context);
2004 }
2005
2006 static int be_msix_register(struct be_adapter *adapter)
2007 {
2008 struct be_rx_obj *rxo;
2009 int status, i;
2010 char qname[10];
2011
2012 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2013 adapter);
2014 if (status)
2015 goto err;
2016
2017 for_all_rx_queues(adapter, rxo, i) {
2018 sprintf(qname, "rxq%d", i);
2019 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2020 qname, rxo);
2021 if (status)
2022 goto err_msix;
2023 }
2024
2025 return 0;
2026
2027 err_msix:
2028 be_free_irq(adapter, &adapter->tx_eq, adapter);
2029
2030 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2031 be_free_irq(adapter, &rxo->rx_eq, rxo);
2032
2033 err:
2034 dev_warn(&adapter->pdev->dev,
2035 "MSIX Request IRQ failed - err %d\n", status);
2036 be_msix_disable(adapter);
2037 return status;
2038 }
2039
2040 static int be_irq_register(struct be_adapter *adapter)
2041 {
2042 struct net_device *netdev = adapter->netdev;
2043 int status;
2044
2045 if (msix_enabled(adapter)) {
2046 status = be_msix_register(adapter);
2047 if (status == 0)
2048 goto done;
2049 /* INTx is not supported for VF */
2050 if (!be_physfn(adapter))
2051 return status;
2052 }
2053
2054 /* INTx */
2055 netdev->irq = adapter->pdev->irq;
2056 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2057 adapter);
2058 if (status) {
2059 dev_err(&adapter->pdev->dev,
2060 "INTx request IRQ failed - err %d\n", status);
2061 return status;
2062 }
2063 done:
2064 adapter->isr_registered = true;
2065 return 0;
2066 }
2067
2068 static void be_irq_unregister(struct be_adapter *adapter)
2069 {
2070 struct net_device *netdev = adapter->netdev;
2071 struct be_rx_obj *rxo;
2072 int i;
2073
2074 if (!adapter->isr_registered)
2075 return;
2076
2077 /* INTx */
2078 if (!msix_enabled(adapter)) {
2079 free_irq(netdev->irq, adapter);
2080 goto done;
2081 }
2082
2083 /* MSIx */
2084 be_free_irq(adapter, &adapter->tx_eq, adapter);
2085
2086 for_all_rx_queues(adapter, rxo, i)
2087 be_free_irq(adapter, &rxo->rx_eq, rxo);
2088
2089 done:
2090 adapter->isr_registered = false;
2091 }
2092
2093 static int be_close(struct net_device *netdev)
2094 {
2095 struct be_adapter *adapter = netdev_priv(netdev);
2096 struct be_rx_obj *rxo;
2097 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2098 int vec, i;
2099
2100 be_async_mcc_disable(adapter);
2101
2102 netif_carrier_off(netdev);
2103 adapter->link_up = false;
2104
2105 if (!lancer_chip(adapter))
2106 be_intr_set(adapter, false);
2107
2108 for_all_rx_queues(adapter, rxo, i)
2109 napi_disable(&rxo->rx_eq.napi);
2110
2111 napi_disable(&tx_eq->napi);
2112
2113 if (lancer_chip(adapter)) {
2114 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2115 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2116 for_all_rx_queues(adapter, rxo, i)
2117 be_cq_notify(adapter, rxo->cq.id, false, 0);
2118 }
2119
2120 if (msix_enabled(adapter)) {
2121 vec = be_msix_vec_get(adapter, tx_eq);
2122 synchronize_irq(vec);
2123
2124 for_all_rx_queues(adapter, rxo, i) {
2125 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2126 synchronize_irq(vec);
2127 }
2128 } else {
2129 synchronize_irq(netdev->irq);
2130 }
2131 be_irq_unregister(adapter);
2132
2133 /* Wait for all pending tx completions to arrive so that
2134 * all tx skbs are freed.
2135 */
2136 be_tx_compl_clean(adapter);
2137
2138 return 0;
2139 }
2140
2141 static int be_open(struct net_device *netdev)
2142 {
2143 struct be_adapter *adapter = netdev_priv(netdev);
2144 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2145 struct be_rx_obj *rxo;
2146 bool link_up;
2147 int status, i;
2148 u8 mac_speed;
2149 u16 link_speed;
2150
2151 for_all_rx_queues(adapter, rxo, i) {
2152 be_post_rx_frags(rxo, GFP_KERNEL);
2153 napi_enable(&rxo->rx_eq.napi);
2154 }
2155 napi_enable(&tx_eq->napi);
2156
2157 be_irq_register(adapter);
2158
2159 if (!lancer_chip(adapter))
2160 be_intr_set(adapter, true);
2161
2162 /* The evt queues are created in unarmed state; arm them */
2163 for_all_rx_queues(adapter, rxo, i) {
2164 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2165 be_cq_notify(adapter, rxo->cq.id, true, 0);
2166 }
2167 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2168
2169 /* Now that interrupts are on we can process async mcc */
2170 be_async_mcc_enable(adapter);
2171
2172 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2173 &link_speed);
2174 if (status)
2175 goto err;
2176 be_link_status_update(adapter, link_up);
2177
2178 if (be_physfn(adapter)) {
2179 status = be_vid_config(adapter, false, 0);
2180 if (status)
2181 goto err;
2182
2183 status = be_cmd_set_flow_control(adapter,
2184 adapter->tx_fc, adapter->rx_fc);
2185 if (status)
2186 goto err;
2187 }
2188
2189 return 0;
2190 err:
2191 be_close(adapter->netdev);
2192 return -EIO;
2193 }
2194
2195 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2196 {
2197 struct be_dma_mem cmd;
2198 int status = 0;
2199 u8 mac[ETH_ALEN];
2200
2201 memset(mac, 0, ETH_ALEN);
2202
2203 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2204 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2205 GFP_KERNEL);
2206 if (cmd.va == NULL)
2207 return -1;
2208 memset(cmd.va, 0, cmd.size);
2209
2210 if (enable) {
2211 status = pci_write_config_dword(adapter->pdev,
2212 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2213 if (status) {
2214 dev_err(&adapter->pdev->dev,
2215 "Could not enable Wake-on-lan\n");
2216 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2217 cmd.dma);
2218 return status;
2219 }
2220 status = be_cmd_enable_magic_wol(adapter,
2221 adapter->netdev->dev_addr, &cmd);
2222 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2223 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2224 } else {
2225 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2226 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2227 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2228 }
2229
2230 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2231 return status;
2232 }
2233
2234 /*
2235 * Generate a seed MAC address from the PF MAC Address using jhash.
2236 * MAC Address for VFs are assigned incrementally starting from the seed.
2237 * These addresses are programmed in the ASIC by the PF and the VF driver
2238 * queries for the MAC address during its probe.
2239 */
2240 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2241 {
2242 u32 vf = 0;
2243 int status = 0;
2244 u8 mac[ETH_ALEN];
2245
2246 be_vf_eth_addr_generate(adapter, mac);
2247
2248 for (vf = 0; vf < num_vfs; vf++) {
2249 status = be_cmd_pmac_add(adapter, mac,
2250 adapter->vf_cfg[vf].vf_if_handle,
2251 &adapter->vf_cfg[vf].vf_pmac_id,
2252 vf + 1);
2253 if (status)
2254 dev_err(&adapter->pdev->dev,
2255 "Mac address add failed for VF %d\n", vf);
2256 else
2257 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2258
2259 mac[5] += 1;
2260 }
2261 return status;
2262 }
2263
2264 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2265 {
2266 u32 vf;
2267
2268 for (vf = 0; vf < num_vfs; vf++) {
2269 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2270 be_cmd_pmac_del(adapter,
2271 adapter->vf_cfg[vf].vf_if_handle,
2272 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2273 }
2274 }
2275
2276 static int be_setup(struct be_adapter *adapter)
2277 {
2278 struct net_device *netdev = adapter->netdev;
2279 u32 cap_flags, en_flags, vf = 0;
2280 int status;
2281 u8 mac[ETH_ALEN];
2282
2283 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2284 BE_IF_FLAGS_BROADCAST |
2285 BE_IF_FLAGS_MULTICAST;
2286
2287 if (be_physfn(adapter)) {
2288 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2289 BE_IF_FLAGS_PROMISCUOUS |
2290 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2291 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2292
2293 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2294 cap_flags |= BE_IF_FLAGS_RSS;
2295 en_flags |= BE_IF_FLAGS_RSS;
2296 }
2297 }
2298
2299 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2300 netdev->dev_addr, false/* pmac_invalid */,
2301 &adapter->if_handle, &adapter->pmac_id, 0);
2302 if (status != 0)
2303 goto do_none;
2304
2305 if (be_physfn(adapter)) {
2306 if (adapter->sriov_enabled) {
2307 while (vf < num_vfs) {
2308 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2309 BE_IF_FLAGS_BROADCAST;
2310 status = be_cmd_if_create(adapter, cap_flags,
2311 en_flags, mac, true,
2312 &adapter->vf_cfg[vf].vf_if_handle,
2313 NULL, vf+1);
2314 if (status) {
2315 dev_err(&adapter->pdev->dev,
2316 "Interface Create failed for VF %d\n",
2317 vf);
2318 goto if_destroy;
2319 }
2320 adapter->vf_cfg[vf].vf_pmac_id =
2321 BE_INVALID_PMAC_ID;
2322 vf++;
2323 }
2324 }
2325 } else {
2326 status = be_cmd_mac_addr_query(adapter, mac,
2327 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2328 if (!status) {
2329 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2330 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2331 }
2332 }
2333
2334 status = be_tx_queues_create(adapter);
2335 if (status != 0)
2336 goto if_destroy;
2337
2338 status = be_rx_queues_create(adapter);
2339 if (status != 0)
2340 goto tx_qs_destroy;
2341
2342 status = be_mcc_queues_create(adapter);
2343 if (status != 0)
2344 goto rx_qs_destroy;
2345
2346 adapter->link_speed = -1;
2347
2348 return 0;
2349
2350 rx_qs_destroy:
2351 be_rx_queues_destroy(adapter);
2352 tx_qs_destroy:
2353 be_tx_queues_destroy(adapter);
2354 if_destroy:
2355 if (be_physfn(adapter) && adapter->sriov_enabled)
2356 for (vf = 0; vf < num_vfs; vf++)
2357 if (adapter->vf_cfg[vf].vf_if_handle)
2358 be_cmd_if_destroy(adapter,
2359 adapter->vf_cfg[vf].vf_if_handle,
2360 vf + 1);
2361 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2362 do_none:
2363 return status;
2364 }
2365
2366 static int be_clear(struct be_adapter *adapter)
2367 {
2368 int vf;
2369
2370 if (be_physfn(adapter) && adapter->sriov_enabled)
2371 be_vf_eth_addr_rem(adapter);
2372
2373 be_mcc_queues_destroy(adapter);
2374 be_rx_queues_destroy(adapter);
2375 be_tx_queues_destroy(adapter);
2376 adapter->eq_next_idx = 0;
2377
2378 if (be_physfn(adapter) && adapter->sriov_enabled)
2379 for (vf = 0; vf < num_vfs; vf++)
2380 if (adapter->vf_cfg[vf].vf_if_handle)
2381 be_cmd_if_destroy(adapter,
2382 adapter->vf_cfg[vf].vf_if_handle,
2383 vf + 1);
2384
2385 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2386
2387 /* tell fw we're done with firing cmds */
2388 be_cmd_fw_clean(adapter);
2389 return 0;
2390 }
2391
2392
2393 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2394 static bool be_flash_redboot(struct be_adapter *adapter,
2395 const u8 *p, u32 img_start, int image_size,
2396 int hdr_size)
2397 {
2398 u32 crc_offset;
2399 u8 flashed_crc[4];
2400 int status;
2401
2402 crc_offset = hdr_size + img_start + image_size - 4;
2403
2404 p += crc_offset;
2405
2406 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2407 (image_size - 4));
2408 if (status) {
2409 dev_err(&adapter->pdev->dev,
2410 "could not get crc from flash, not flashing redboot\n");
2411 return false;
2412 }
2413
2414 /*update redboot only if crc does not match*/
2415 if (!memcmp(flashed_crc, p, 4))
2416 return false;
2417 else
2418 return true;
2419 }
2420
2421 static int be_flash_data(struct be_adapter *adapter,
2422 const struct firmware *fw,
2423 struct be_dma_mem *flash_cmd, int num_of_images)
2424
2425 {
2426 int status = 0, i, filehdr_size = 0;
2427 u32 total_bytes = 0, flash_op;
2428 int num_bytes;
2429 const u8 *p = fw->data;
2430 struct be_cmd_write_flashrom *req = flash_cmd->va;
2431 const struct flash_comp *pflashcomp;
2432 int num_comp;
2433
2434 static const struct flash_comp gen3_flash_types[9] = {
2435 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2436 FLASH_IMAGE_MAX_SIZE_g3},
2437 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2438 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2439 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2440 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2441 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2442 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2443 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2444 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2445 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2446 FLASH_IMAGE_MAX_SIZE_g3},
2447 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2448 FLASH_IMAGE_MAX_SIZE_g3},
2449 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2450 FLASH_IMAGE_MAX_SIZE_g3},
2451 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2452 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2453 };
2454 static const struct flash_comp gen2_flash_types[8] = {
2455 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2456 FLASH_IMAGE_MAX_SIZE_g2},
2457 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2458 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2459 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2460 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2461 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2462 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2463 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2464 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2465 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2466 FLASH_IMAGE_MAX_SIZE_g2},
2467 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2468 FLASH_IMAGE_MAX_SIZE_g2},
2469 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2470 FLASH_IMAGE_MAX_SIZE_g2}
2471 };
2472
2473 if (adapter->generation == BE_GEN3) {
2474 pflashcomp = gen3_flash_types;
2475 filehdr_size = sizeof(struct flash_file_hdr_g3);
2476 num_comp = ARRAY_SIZE(gen3_flash_types);
2477 } else {
2478 pflashcomp = gen2_flash_types;
2479 filehdr_size = sizeof(struct flash_file_hdr_g2);
2480 num_comp = ARRAY_SIZE(gen2_flash_types);
2481 }
2482 for (i = 0; i < num_comp; i++) {
2483 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2484 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2485 continue;
2486 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2487 (!be_flash_redboot(adapter, fw->data,
2488 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2489 (num_of_images * sizeof(struct image_hdr)))))
2490 continue;
2491 p = fw->data;
2492 p += filehdr_size + pflashcomp[i].offset
2493 + (num_of_images * sizeof(struct image_hdr));
2494 if (p + pflashcomp[i].size > fw->data + fw->size)
2495 return -1;
2496 total_bytes = pflashcomp[i].size;
2497 while (total_bytes) {
2498 if (total_bytes > 32*1024)
2499 num_bytes = 32*1024;
2500 else
2501 num_bytes = total_bytes;
2502 total_bytes -= num_bytes;
2503
2504 if (!total_bytes)
2505 flash_op = FLASHROM_OPER_FLASH;
2506 else
2507 flash_op = FLASHROM_OPER_SAVE;
2508 memcpy(req->params.data_buf, p, num_bytes);
2509 p += num_bytes;
2510 status = be_cmd_write_flashrom(adapter, flash_cmd,
2511 pflashcomp[i].optype, flash_op, num_bytes);
2512 if (status) {
2513 dev_err(&adapter->pdev->dev,
2514 "cmd to write to flash rom failed.\n");
2515 return -1;
2516 }
2517 yield();
2518 }
2519 }
2520 return 0;
2521 }
2522
2523 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2524 {
2525 if (fhdr == NULL)
2526 return 0;
2527 if (fhdr->build[0] == '3')
2528 return BE_GEN3;
2529 else if (fhdr->build[0] == '2')
2530 return BE_GEN2;
2531 else
2532 return 0;
2533 }
2534
2535 int be_load_fw(struct be_adapter *adapter, u8 *func)
2536 {
2537 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2538 const struct firmware *fw;
2539 struct flash_file_hdr_g2 *fhdr;
2540 struct flash_file_hdr_g3 *fhdr3;
2541 struct image_hdr *img_hdr_ptr = NULL;
2542 struct be_dma_mem flash_cmd;
2543 int status, i = 0, num_imgs = 0;
2544 const u8 *p;
2545
2546 if (!netif_running(adapter->netdev)) {
2547 dev_err(&adapter->pdev->dev,
2548 "Firmware load not allowed (interface is down)\n");
2549 return -EPERM;
2550 }
2551
2552 strcpy(fw_file, func);
2553
2554 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2555 if (status)
2556 goto fw_exit;
2557
2558 p = fw->data;
2559 fhdr = (struct flash_file_hdr_g2 *) p;
2560 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2561
2562 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2563 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2564 &flash_cmd.dma, GFP_KERNEL);
2565 if (!flash_cmd.va) {
2566 status = -ENOMEM;
2567 dev_err(&adapter->pdev->dev,
2568 "Memory allocation failure while flashing\n");
2569 goto fw_exit;
2570 }
2571
2572 if ((adapter->generation == BE_GEN3) &&
2573 (get_ufigen_type(fhdr) == BE_GEN3)) {
2574 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2575 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2576 for (i = 0; i < num_imgs; i++) {
2577 img_hdr_ptr = (struct image_hdr *) (fw->data +
2578 (sizeof(struct flash_file_hdr_g3) +
2579 i * sizeof(struct image_hdr)));
2580 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2581 status = be_flash_data(adapter, fw, &flash_cmd,
2582 num_imgs);
2583 }
2584 } else if ((adapter->generation == BE_GEN2) &&
2585 (get_ufigen_type(fhdr) == BE_GEN2)) {
2586 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2587 } else {
2588 dev_err(&adapter->pdev->dev,
2589 "UFI and Interface are not compatible for flashing\n");
2590 status = -1;
2591 }
2592
2593 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2594 flash_cmd.dma);
2595 if (status) {
2596 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2597 goto fw_exit;
2598 }
2599
2600 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2601
2602 fw_exit:
2603 release_firmware(fw);
2604 return status;
2605 }
2606
2607 static struct net_device_ops be_netdev_ops = {
2608 .ndo_open = be_open,
2609 .ndo_stop = be_close,
2610 .ndo_start_xmit = be_xmit,
2611 .ndo_set_rx_mode = be_set_multicast_list,
2612 .ndo_set_mac_address = be_mac_addr_set,
2613 .ndo_change_mtu = be_change_mtu,
2614 .ndo_validate_addr = eth_validate_addr,
2615 .ndo_vlan_rx_register = be_vlan_register,
2616 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2617 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2618 .ndo_set_vf_mac = be_set_vf_mac,
2619 .ndo_set_vf_vlan = be_set_vf_vlan,
2620 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2621 .ndo_get_vf_config = be_get_vf_config
2622 };
2623
2624 static void be_netdev_init(struct net_device *netdev)
2625 {
2626 struct be_adapter *adapter = netdev_priv(netdev);
2627 struct be_rx_obj *rxo;
2628 int i;
2629
2630 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2631 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2632 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2633 NETIF_F_GRO | NETIF_F_TSO6;
2634
2635 if (be_multi_rxq(adapter))
2636 netdev->features |= NETIF_F_RXHASH;
2637
2638 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2639 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2640
2641 if (lancer_chip(adapter))
2642 netdev->vlan_features |= NETIF_F_TSO6;
2643
2644 netdev->flags |= IFF_MULTICAST;
2645
2646 adapter->rx_csum = true;
2647
2648 /* Default settings for Rx and Tx flow control */
2649 adapter->rx_fc = true;
2650 adapter->tx_fc = true;
2651
2652 netif_set_gso_max_size(netdev, 65535);
2653
2654 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2655
2656 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2657
2658 for_all_rx_queues(adapter, rxo, i)
2659 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2660 BE_NAPI_WEIGHT);
2661
2662 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2663 BE_NAPI_WEIGHT);
2664 }
2665
2666 static void be_unmap_pci_bars(struct be_adapter *adapter)
2667 {
2668 if (adapter->csr)
2669 iounmap(adapter->csr);
2670 if (adapter->db)
2671 iounmap(adapter->db);
2672 if (adapter->pcicfg && be_physfn(adapter))
2673 iounmap(adapter->pcicfg);
2674 }
2675
2676 static int be_map_pci_bars(struct be_adapter *adapter)
2677 {
2678 u8 __iomem *addr;
2679 int pcicfg_reg, db_reg;
2680
2681 if (lancer_chip(adapter)) {
2682 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2683 pci_resource_len(adapter->pdev, 0));
2684 if (addr == NULL)
2685 return -ENOMEM;
2686 adapter->db = addr;
2687 return 0;
2688 }
2689
2690 if (be_physfn(adapter)) {
2691 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2692 pci_resource_len(adapter->pdev, 2));
2693 if (addr == NULL)
2694 return -ENOMEM;
2695 adapter->csr = addr;
2696 }
2697
2698 if (adapter->generation == BE_GEN2) {
2699 pcicfg_reg = 1;
2700 db_reg = 4;
2701 } else {
2702 pcicfg_reg = 0;
2703 if (be_physfn(adapter))
2704 db_reg = 4;
2705 else
2706 db_reg = 0;
2707 }
2708 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2709 pci_resource_len(adapter->pdev, db_reg));
2710 if (addr == NULL)
2711 goto pci_map_err;
2712 adapter->db = addr;
2713
2714 if (be_physfn(adapter)) {
2715 addr = ioremap_nocache(
2716 pci_resource_start(adapter->pdev, pcicfg_reg),
2717 pci_resource_len(adapter->pdev, pcicfg_reg));
2718 if (addr == NULL)
2719 goto pci_map_err;
2720 adapter->pcicfg = addr;
2721 } else
2722 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2723
2724 return 0;
2725 pci_map_err:
2726 be_unmap_pci_bars(adapter);
2727 return -ENOMEM;
2728 }
2729
2730
2731 static void be_ctrl_cleanup(struct be_adapter *adapter)
2732 {
2733 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2734
2735 be_unmap_pci_bars(adapter);
2736
2737 if (mem->va)
2738 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2739 mem->dma);
2740
2741 mem = &adapter->mc_cmd_mem;
2742 if (mem->va)
2743 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2744 mem->dma);
2745 }
2746
2747 static int be_ctrl_init(struct be_adapter *adapter)
2748 {
2749 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2750 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2751 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2752 int status;
2753
2754 status = be_map_pci_bars(adapter);
2755 if (status)
2756 goto done;
2757
2758 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2759 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2760 mbox_mem_alloc->size,
2761 &mbox_mem_alloc->dma,
2762 GFP_KERNEL);
2763 if (!mbox_mem_alloc->va) {
2764 status = -ENOMEM;
2765 goto unmap_pci_bars;
2766 }
2767
2768 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2769 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2770 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2771 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2772
2773 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2774 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2775 mc_cmd_mem->size, &mc_cmd_mem->dma,
2776 GFP_KERNEL);
2777 if (mc_cmd_mem->va == NULL) {
2778 status = -ENOMEM;
2779 goto free_mbox;
2780 }
2781 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2782
2783 mutex_init(&adapter->mbox_lock);
2784 spin_lock_init(&adapter->mcc_lock);
2785 spin_lock_init(&adapter->mcc_cq_lock);
2786
2787 init_completion(&adapter->flash_compl);
2788 pci_save_state(adapter->pdev);
2789 return 0;
2790
2791 free_mbox:
2792 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2793 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2794
2795 unmap_pci_bars:
2796 be_unmap_pci_bars(adapter);
2797
2798 done:
2799 return status;
2800 }
2801
2802 static void be_stats_cleanup(struct be_adapter *adapter)
2803 {
2804 struct be_dma_mem *cmd = &adapter->stats_cmd;
2805
2806 if (cmd->va)
2807 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2808 cmd->va, cmd->dma);
2809 }
2810
2811 static int be_stats_init(struct be_adapter *adapter)
2812 {
2813 struct be_dma_mem *cmd = &adapter->stats_cmd;
2814
2815 cmd->size = sizeof(struct be_cmd_req_get_stats);
2816 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2817 GFP_KERNEL);
2818 if (cmd->va == NULL)
2819 return -1;
2820 memset(cmd->va, 0, cmd->size);
2821 return 0;
2822 }
2823
2824 static void __devexit be_remove(struct pci_dev *pdev)
2825 {
2826 struct be_adapter *adapter = pci_get_drvdata(pdev);
2827
2828 if (!adapter)
2829 return;
2830
2831 cancel_delayed_work_sync(&adapter->work);
2832
2833 unregister_netdev(adapter->netdev);
2834
2835 be_clear(adapter);
2836
2837 be_stats_cleanup(adapter);
2838
2839 be_ctrl_cleanup(adapter);
2840
2841 kfree(adapter->vf_cfg);
2842 be_sriov_disable(adapter);
2843
2844 be_msix_disable(adapter);
2845
2846 pci_set_drvdata(pdev, NULL);
2847 pci_release_regions(pdev);
2848 pci_disable_device(pdev);
2849
2850 free_netdev(adapter->netdev);
2851 }
2852
2853 static int be_get_config(struct be_adapter *adapter)
2854 {
2855 int status;
2856 u8 mac[ETH_ALEN];
2857
2858 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2859 if (status)
2860 return status;
2861
2862 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2863 &adapter->function_mode, &adapter->function_caps);
2864 if (status)
2865 return status;
2866
2867 memset(mac, 0, ETH_ALEN);
2868
2869 if (be_physfn(adapter)) {
2870 status = be_cmd_mac_addr_query(adapter, mac,
2871 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2872
2873 if (status)
2874 return status;
2875
2876 if (!is_valid_ether_addr(mac))
2877 return -EADDRNOTAVAIL;
2878
2879 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2880 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2881 }
2882
2883 if (adapter->function_mode & 0x400)
2884 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2885 else
2886 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2887
2888 status = be_cmd_get_cntl_attributes(adapter);
2889 if (status)
2890 return status;
2891
2892 be_cmd_check_native_mode(adapter);
2893 return 0;
2894 }
2895
2896 static int be_dev_family_check(struct be_adapter *adapter)
2897 {
2898 struct pci_dev *pdev = adapter->pdev;
2899 u32 sli_intf = 0, if_type;
2900
2901 switch (pdev->device) {
2902 case BE_DEVICE_ID1:
2903 case OC_DEVICE_ID1:
2904 adapter->generation = BE_GEN2;
2905 break;
2906 case BE_DEVICE_ID2:
2907 case OC_DEVICE_ID2:
2908 adapter->generation = BE_GEN3;
2909 break;
2910 case OC_DEVICE_ID3:
2911 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2912 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2913 SLI_INTF_IF_TYPE_SHIFT;
2914
2915 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2916 if_type != 0x02) {
2917 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2918 return -EINVAL;
2919 }
2920 if (num_vfs > 0) {
2921 dev_err(&pdev->dev, "VFs not supported\n");
2922 return -EINVAL;
2923 }
2924 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2925 SLI_INTF_FAMILY_SHIFT);
2926 adapter->generation = BE_GEN3;
2927 break;
2928 default:
2929 adapter->generation = 0;
2930 }
2931 return 0;
2932 }
2933
2934 static int lancer_wait_ready(struct be_adapter *adapter)
2935 {
2936 #define SLIPORT_READY_TIMEOUT 500
2937 u32 sliport_status;
2938 int status = 0, i;
2939
2940 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2941 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2942 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2943 break;
2944
2945 msleep(20);
2946 }
2947
2948 if (i == SLIPORT_READY_TIMEOUT)
2949 status = -1;
2950
2951 return status;
2952 }
2953
2954 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2955 {
2956 int status;
2957 u32 sliport_status, err, reset_needed;
2958 status = lancer_wait_ready(adapter);
2959 if (!status) {
2960 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2961 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2962 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2963 if (err && reset_needed) {
2964 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2965 adapter->db + SLIPORT_CONTROL_OFFSET);
2966
2967 /* check adapter has corrected the error */
2968 status = lancer_wait_ready(adapter);
2969 sliport_status = ioread32(adapter->db +
2970 SLIPORT_STATUS_OFFSET);
2971 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2972 SLIPORT_STATUS_RN_MASK);
2973 if (status || sliport_status)
2974 status = -1;
2975 } else if (err || reset_needed) {
2976 status = -1;
2977 }
2978 }
2979 return status;
2980 }
2981
2982 static int __devinit be_probe(struct pci_dev *pdev,
2983 const struct pci_device_id *pdev_id)
2984 {
2985 int status = 0;
2986 struct be_adapter *adapter;
2987 struct net_device *netdev;
2988
2989 status = pci_enable_device(pdev);
2990 if (status)
2991 goto do_none;
2992
2993 status = pci_request_regions(pdev, DRV_NAME);
2994 if (status)
2995 goto disable_dev;
2996 pci_set_master(pdev);
2997
2998 netdev = alloc_etherdev(sizeof(struct be_adapter));
2999 if (netdev == NULL) {
3000 status = -ENOMEM;
3001 goto rel_reg;
3002 }
3003 adapter = netdev_priv(netdev);
3004 adapter->pdev = pdev;
3005 pci_set_drvdata(pdev, adapter);
3006
3007 status = be_dev_family_check(adapter);
3008 if (status)
3009 goto free_netdev;
3010
3011 adapter->netdev = netdev;
3012 SET_NETDEV_DEV(netdev, &pdev->dev);
3013
3014 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3015 if (!status) {
3016 netdev->features |= NETIF_F_HIGHDMA;
3017 } else {
3018 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3019 if (status) {
3020 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3021 goto free_netdev;
3022 }
3023 }
3024
3025 be_sriov_enable(adapter);
3026 if (adapter->sriov_enabled) {
3027 adapter->vf_cfg = kcalloc(num_vfs,
3028 sizeof(struct be_vf_cfg), GFP_KERNEL);
3029
3030 if (!adapter->vf_cfg)
3031 goto free_netdev;
3032 }
3033
3034 status = be_ctrl_init(adapter);
3035 if (status)
3036 goto free_vf_cfg;
3037
3038 if (lancer_chip(adapter)) {
3039 status = lancer_test_and_set_rdy_state(adapter);
3040 if (status) {
3041 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3042 goto ctrl_clean;
3043 }
3044 }
3045
3046 /* sync up with fw's ready state */
3047 if (be_physfn(adapter)) {
3048 status = be_cmd_POST(adapter);
3049 if (status)
3050 goto ctrl_clean;
3051 }
3052
3053 /* tell fw we're ready to fire cmds */
3054 status = be_cmd_fw_init(adapter);
3055 if (status)
3056 goto ctrl_clean;
3057
3058 status = be_cmd_reset_function(adapter);
3059 if (status)
3060 goto ctrl_clean;
3061
3062 status = be_stats_init(adapter);
3063 if (status)
3064 goto ctrl_clean;
3065
3066 status = be_get_config(adapter);
3067 if (status)
3068 goto stats_clean;
3069
3070 be_msix_enable(adapter);
3071
3072 INIT_DELAYED_WORK(&adapter->work, be_worker);
3073
3074 status = be_setup(adapter);
3075 if (status)
3076 goto msix_disable;
3077
3078 be_netdev_init(netdev);
3079 status = register_netdev(netdev);
3080 if (status != 0)
3081 goto unsetup;
3082 netif_carrier_off(netdev);
3083
3084 if (be_physfn(adapter) && adapter->sriov_enabled) {
3085 status = be_vf_eth_addr_config(adapter);
3086 if (status)
3087 goto unreg_netdev;
3088 }
3089
3090 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3091 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3092 return 0;
3093
3094 unreg_netdev:
3095 unregister_netdev(netdev);
3096 unsetup:
3097 be_clear(adapter);
3098 msix_disable:
3099 be_msix_disable(adapter);
3100 stats_clean:
3101 be_stats_cleanup(adapter);
3102 ctrl_clean:
3103 be_ctrl_cleanup(adapter);
3104 free_vf_cfg:
3105 kfree(adapter->vf_cfg);
3106 free_netdev:
3107 be_sriov_disable(adapter);
3108 free_netdev(netdev);
3109 pci_set_drvdata(pdev, NULL);
3110 rel_reg:
3111 pci_release_regions(pdev);
3112 disable_dev:
3113 pci_disable_device(pdev);
3114 do_none:
3115 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3116 return status;
3117 }
3118
3119 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3120 {
3121 struct be_adapter *adapter = pci_get_drvdata(pdev);
3122 struct net_device *netdev = adapter->netdev;
3123
3124 cancel_delayed_work_sync(&adapter->work);
3125 if (adapter->wol)
3126 be_setup_wol(adapter, true);
3127
3128 netif_device_detach(netdev);
3129 if (netif_running(netdev)) {
3130 rtnl_lock();
3131 be_close(netdev);
3132 rtnl_unlock();
3133 }
3134 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3135 be_clear(adapter);
3136
3137 be_msix_disable(adapter);
3138 pci_save_state(pdev);
3139 pci_disable_device(pdev);
3140 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3141 return 0;
3142 }
3143
3144 static int be_resume(struct pci_dev *pdev)
3145 {
3146 int status = 0;
3147 struct be_adapter *adapter = pci_get_drvdata(pdev);
3148 struct net_device *netdev = adapter->netdev;
3149
3150 netif_device_detach(netdev);
3151
3152 status = pci_enable_device(pdev);
3153 if (status)
3154 return status;
3155
3156 pci_set_power_state(pdev, 0);
3157 pci_restore_state(pdev);
3158
3159 be_msix_enable(adapter);
3160 /* tell fw we're ready to fire cmds */
3161 status = be_cmd_fw_init(adapter);
3162 if (status)
3163 return status;
3164
3165 be_setup(adapter);
3166 if (netif_running(netdev)) {
3167 rtnl_lock();
3168 be_open(netdev);
3169 rtnl_unlock();
3170 }
3171 netif_device_attach(netdev);
3172
3173 if (adapter->wol)
3174 be_setup_wol(adapter, false);
3175
3176 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3177 return 0;
3178 }
3179
3180 /*
3181 * An FLR will stop BE from DMAing any data.
3182 */
3183 static void be_shutdown(struct pci_dev *pdev)
3184 {
3185 struct be_adapter *adapter = pci_get_drvdata(pdev);
3186
3187 if (!adapter)
3188 return;
3189
3190 cancel_delayed_work_sync(&adapter->work);
3191
3192 netif_device_detach(adapter->netdev);
3193
3194 be_cmd_reset_function(adapter);
3195
3196 if (adapter->wol)
3197 be_setup_wol(adapter, true);
3198
3199 pci_disable_device(pdev);
3200 }
3201
3202 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3203 pci_channel_state_t state)
3204 {
3205 struct be_adapter *adapter = pci_get_drvdata(pdev);
3206 struct net_device *netdev = adapter->netdev;
3207
3208 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3209
3210 adapter->eeh_err = true;
3211
3212 netif_device_detach(netdev);
3213
3214 if (netif_running(netdev)) {
3215 rtnl_lock();
3216 be_close(netdev);
3217 rtnl_unlock();
3218 }
3219 be_clear(adapter);
3220
3221 if (state == pci_channel_io_perm_failure)
3222 return PCI_ERS_RESULT_DISCONNECT;
3223
3224 pci_disable_device(pdev);
3225
3226 return PCI_ERS_RESULT_NEED_RESET;
3227 }
3228
3229 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3230 {
3231 struct be_adapter *adapter = pci_get_drvdata(pdev);
3232 int status;
3233
3234 dev_info(&adapter->pdev->dev, "EEH reset\n");
3235 adapter->eeh_err = false;
3236
3237 status = pci_enable_device(pdev);
3238 if (status)
3239 return PCI_ERS_RESULT_DISCONNECT;
3240
3241 pci_set_master(pdev);
3242 pci_set_power_state(pdev, 0);
3243 pci_restore_state(pdev);
3244
3245 /* Check if card is ok and fw is ready */
3246 status = be_cmd_POST(adapter);
3247 if (status)
3248 return PCI_ERS_RESULT_DISCONNECT;
3249
3250 return PCI_ERS_RESULT_RECOVERED;
3251 }
3252
3253 static void be_eeh_resume(struct pci_dev *pdev)
3254 {
3255 int status = 0;
3256 struct be_adapter *adapter = pci_get_drvdata(pdev);
3257 struct net_device *netdev = adapter->netdev;
3258
3259 dev_info(&adapter->pdev->dev, "EEH resume\n");
3260
3261 pci_save_state(pdev);
3262
3263 /* tell fw we're ready to fire cmds */
3264 status = be_cmd_fw_init(adapter);
3265 if (status)
3266 goto err;
3267
3268 status = be_setup(adapter);
3269 if (status)
3270 goto err;
3271
3272 if (netif_running(netdev)) {
3273 status = be_open(netdev);
3274 if (status)
3275 goto err;
3276 }
3277 netif_device_attach(netdev);
3278 return;
3279 err:
3280 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3281 }
3282
3283 static struct pci_error_handlers be_eeh_handlers = {
3284 .error_detected = be_eeh_err_detected,
3285 .slot_reset = be_eeh_reset,
3286 .resume = be_eeh_resume,
3287 };
3288
3289 static struct pci_driver be_driver = {
3290 .name = DRV_NAME,
3291 .id_table = be_dev_ids,
3292 .probe = be_probe,
3293 .remove = be_remove,
3294 .suspend = be_suspend,
3295 .resume = be_resume,
3296 .shutdown = be_shutdown,
3297 .err_handler = &be_eeh_handlers
3298 };
3299
3300 static int __init be_init_module(void)
3301 {
3302 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3303 rx_frag_size != 2048) {
3304 printk(KERN_WARNING DRV_NAME
3305 " : Module param rx_frag_size must be 2048/4096/8192."
3306 " Using 2048\n");
3307 rx_frag_size = 2048;
3308 }
3309
3310 return pci_register_driver(&be_driver);
3311 }
3312 module_init(be_init_module);
3313
3314 static void __exit be_exit_module(void)
3315 {
3316 pci_unregister_driver(&be_driver);
3317 }
3318 module_exit(be_exit_module);
This page took 0.159917 seconds and 6 git commands to generate.