Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-3.6
[deliverable/linux.git] / drivers / net / benet / be_main.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#include "be.h"
19#include "be_cmds.h"
20#include <asm/div64.h>
21
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static ushort rx_frag_size = 2048;
29static unsigned int num_vfs;
30module_param(rx_frag_size, ushort, S_IRUGO);
31module_param(num_vfs, uint, S_IRUGO);
32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
48/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
118
119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120{
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
125}
126
127static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142}
143
144static void be_intr_set(struct be_adapter *adapter, bool enable)
145{
146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
149
150 if (adapter->eeh_err)
151 return;
152
153 if (!enabled && enable)
154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 else if (enabled && !enable)
156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157 else
158 return;
159
160 iowrite32(reg, addr);
161}
162
163static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
164{
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
168
169 wmb();
170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
171}
172
173static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
174{
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
178
179 wmb();
180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
181}
182
183static void be_eq_notify(struct be_adapter *adapter, u16 qid,
184 bool arm, bool clear_int, u16 num_popped)
185{
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
190
191 if (adapter->eeh_err)
192 return;
193
194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
201}
202
203void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
204{
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
209
210 if (adapter->eeh_err)
211 return;
212
213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
217}
218
219static int be_mac_addr_set(struct net_device *netdev, void *p)
220{
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
236 if (status)
237 return status;
238
239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
240 adapter->if_handle, &adapter->pmac_id, 0);
241netdev_addr:
242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246}
247
248void netdev_stats_update(struct be_adapter *adapter)
249{
250 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
251 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252 struct be_port_rxf_stats *port_stats =
253 &rxf_stats->port[adapter->port_num];
254 struct net_device_stats *dev_stats = &adapter->netdev->stats;
255 struct be_erx_stats *erx_stats = &hw_stats->erx;
256 struct be_rx_obj *rxo;
257 int i;
258
259 memset(dev_stats, 0, sizeof(*dev_stats));
260 for_all_rx_queues(adapter, rxo, i) {
261 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264 /* no space in linux buffers: best possible approximation */
265 dev_stats->rx_dropped +=
266 erx_stats->rx_drops_no_fragments[rxo->q.id];
267 }
268
269 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
271
272 /* bad pkts received */
273 dev_stats->rx_errors = port_stats->rx_crc_errors +
274 port_stats->rx_alignment_symbol_errors +
275 port_stats->rx_in_range_errors +
276 port_stats->rx_out_range_errors +
277 port_stats->rx_frame_too_long +
278 port_stats->rx_dropped_too_small +
279 port_stats->rx_dropped_too_short +
280 port_stats->rx_dropped_header_too_small +
281 port_stats->rx_dropped_tcp_length +
282 port_stats->rx_dropped_runt +
283 port_stats->rx_tcp_checksum_errs +
284 port_stats->rx_ip_checksum_errs +
285 port_stats->rx_udp_checksum_errs;
286
287 /* detailed rx errors */
288 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
289 port_stats->rx_out_range_errors +
290 port_stats->rx_frame_too_long;
291
292 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
293
294 /* frame alignment errors */
295 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
296
297 /* receiver fifo overrun */
298 /* drops_no_pbuf is no per i/f, it's per BE card */
299 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300 port_stats->rx_input_fifo_overflow +
301 rxf_stats->rx_drops_no_pbuf;
302}
303
304void be_link_status_update(struct be_adapter *adapter, bool link_up)
305{
306 struct net_device *netdev = adapter->netdev;
307
308 /* If link came up or went down */
309 if (adapter->link_up != link_up) {
310 adapter->link_speed = -1;
311 if (link_up) {
312 netif_carrier_on(netdev);
313 printk(KERN_INFO "%s: Link up\n", netdev->name);
314 } else {
315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
317 }
318 adapter->link_up = link_up;
319 }
320}
321
322/* Update the EQ delay n BE based on the RX frags consumed / sec */
323static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
324{
325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
327 ulong now = jiffies;
328 u32 eqd;
329
330 if (!rx_eq->enable_aic)
331 return;
332
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
337 }
338
339 /* Update once a second */
340 if ((now - stats->rx_fps_jiffies) < HZ)
341 return;
342
343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
344 ((now - stats->rx_fps_jiffies) / HZ);
345
346 stats->rx_fps_jiffies = now;
347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
358
359 rx_eq->cur_eqd = eqd;
360}
361
362static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363{
364 u64 rate = bytes;
365
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
369
370 return rate;
371}
372
373static void be_tx_rate_update(struct be_adapter *adapter)
374{
375 struct be_tx_stats *stats = tx_stats(adapter);
376 ulong now = jiffies;
377
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
382 }
383
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391 }
392}
393
394static void be_tx_stats_update(struct be_adapter *adapter,
395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
396{
397 struct be_tx_stats *stats = tx_stats(adapter);
398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
402 if (stopped)
403 stats->be_tx_stops++;
404}
405
406/* Determine number of WRB entries needed to xmit data in an skb */
407static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
408 bool *dummy)
409{
410 int cnt = (skb->len > skb->data_len);
411
412 cnt += skb_shinfo(skb)->nr_frags;
413
414 /* to account for hdr wrb */
415 cnt++;
416 if (lancer_chip(adapter) || !(cnt & 1)) {
417 *dummy = false;
418 } else {
419 /* add a dummy to make it an even num */
420 cnt++;
421 *dummy = true;
422 }
423 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
424 return cnt;
425}
426
427static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
428{
429 wrb->frag_pa_hi = upper_32_bits(addr);
430 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432}
433
434static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435 struct sk_buff *skb, u32 wrb_cnt, u32 len)
436{
437 u8 vlan_prio = 0;
438 u16 vlan_tag = 0;
439
440 memset(hdr, 0, sizeof(*hdr));
441
442 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
443
444 if (skb_is_gso(skb)) {
445 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447 hdr, skb_shinfo(skb)->gso_size);
448 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
450 if (lancer_chip(adapter) && adapter->sli_family ==
451 LANCER_A0_SLI_FAMILY) {
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
453 if (is_tcp_pkt(skb))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
455 tcpcs, hdr, 1);
456 else if (is_udp_pkt(skb))
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458 udpcs, hdr, 1);
459 }
460 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
461 if (is_tcp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463 else if (is_udp_pkt(skb))
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
465 }
466
467 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
469 vlan_tag = vlan_tx_tag_get(skb);
470 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471 /* If vlan priority provided by OS is NOT in available bmap */
472 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474 adapter->recommended_prio;
475 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
476 }
477
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
482}
483
484static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
485 bool unmap_single)
486{
487 dma_addr_t dma;
488
489 be_dws_le_to_cpu(wrb, sizeof(*wrb));
490
491 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
492 if (wrb->frag_len) {
493 if (unmap_single)
494 dma_unmap_single(dev, dma, wrb->frag_len,
495 DMA_TO_DEVICE);
496 else
497 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
498 }
499}
500
501static int make_tx_wrbs(struct be_adapter *adapter,
502 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
503{
504 dma_addr_t busaddr;
505 int i, copied = 0;
506 struct device *dev = &adapter->pdev->dev;
507 struct sk_buff *first_skb = skb;
508 struct be_queue_info *txq = &adapter->tx_obj.q;
509 struct be_eth_wrb *wrb;
510 struct be_eth_hdr_wrb *hdr;
511 bool map_single = false;
512 u16 map_head;
513
514 hdr = queue_head_node(txq);
515 queue_head_inc(txq);
516 map_head = txq->head;
517
518 if (skb->len > skb->data_len) {
519 int len = skb_headlen(skb);
520 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521 if (dma_mapping_error(dev, busaddr))
522 goto dma_err;
523 map_single = true;
524 wrb = queue_head_node(txq);
525 wrb_fill(wrb, busaddr, len);
526 be_dws_cpu_to_le(wrb, sizeof(*wrb));
527 queue_head_inc(txq);
528 copied += len;
529 }
530
531 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532 struct skb_frag_struct *frag =
533 &skb_shinfo(skb)->frags[i];
534 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535 frag->size, DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, busaddr))
537 goto dma_err;
538 wrb = queue_head_node(txq);
539 wrb_fill(wrb, busaddr, frag->size);
540 be_dws_cpu_to_le(wrb, sizeof(*wrb));
541 queue_head_inc(txq);
542 copied += frag->size;
543 }
544
545 if (dummy_wrb) {
546 wrb = queue_head_node(txq);
547 wrb_fill(wrb, 0, 0);
548 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549 queue_head_inc(txq);
550 }
551
552 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
553 be_dws_cpu_to_le(hdr, sizeof(*hdr));
554
555 return copied;
556dma_err:
557 txq->head = map_head;
558 while (copied) {
559 wrb = queue_head_node(txq);
560 unmap_tx_frag(dev, wrb, map_single);
561 map_single = false;
562 copied -= wrb->frag_len;
563 queue_head_inc(txq);
564 }
565 return 0;
566}
567
568static netdev_tx_t be_xmit(struct sk_buff *skb,
569 struct net_device *netdev)
570{
571 struct be_adapter *adapter = netdev_priv(netdev);
572 struct be_tx_obj *tx_obj = &adapter->tx_obj;
573 struct be_queue_info *txq = &tx_obj->q;
574 u32 wrb_cnt = 0, copied = 0;
575 u32 start = txq->head;
576 bool dummy_wrb, stopped = false;
577
578 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
579
580 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
581 if (copied) {
582 /* record the sent skb in the sent_skb table */
583 BUG_ON(tx_obj->sent_skb_list[start]);
584 tx_obj->sent_skb_list[start] = skb;
585
586 /* Ensure txq has space for the next skb; Else stop the queue
587 * *BEFORE* ringing the tx doorbell, so that we serialze the
588 * tx compls of the current transmit which'll wake up the queue
589 */
590 atomic_add(wrb_cnt, &txq->used);
591 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
592 txq->len) {
593 netif_stop_queue(netdev);
594 stopped = true;
595 }
596
597 be_txq_notify(adapter, txq->id, wrb_cnt);
598
599 be_tx_stats_update(adapter, wrb_cnt, copied,
600 skb_shinfo(skb)->gso_segs, stopped);
601 } else {
602 txq->head = start;
603 dev_kfree_skb_any(skb);
604 }
605 return NETDEV_TX_OK;
606}
607
608static int be_change_mtu(struct net_device *netdev, int new_mtu)
609{
610 struct be_adapter *adapter = netdev_priv(netdev);
611 if (new_mtu < BE_MIN_MTU ||
612 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613 (ETH_HLEN + ETH_FCS_LEN))) {
614 dev_info(&adapter->pdev->dev,
615 "MTU must be between %d and %d bytes\n",
616 BE_MIN_MTU,
617 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
618 return -EINVAL;
619 }
620 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621 netdev->mtu, new_mtu);
622 netdev->mtu = new_mtu;
623 return 0;
624}
625
626/*
627 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628 * If the user configures more, place BE in vlan promiscuous mode.
629 */
630static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
631{
632 u16 vtag[BE_NUM_VLANS_SUPPORTED];
633 u16 ntags = 0, i;
634 int status = 0;
635 u32 if_handle;
636
637 if (vf) {
638 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
641 }
642
643 if (adapter->vlans_added <= adapter->max_vlans) {
644 /* Construct VLAN Table to give to HW */
645 for (i = 0; i < VLAN_N_VID; i++) {
646 if (adapter->vlan_tag[i]) {
647 vtag[ntags] = cpu_to_le16(i);
648 ntags++;
649 }
650 }
651 status = be_cmd_vlan_config(adapter, adapter->if_handle,
652 vtag, ntags, 1, 0);
653 } else {
654 status = be_cmd_vlan_config(adapter, adapter->if_handle,
655 NULL, 0, 1, 1);
656 }
657
658 return status;
659}
660
661static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
662{
663 struct be_adapter *adapter = netdev_priv(netdev);
664
665 adapter->vlan_grp = grp;
666}
667
668static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
669{
670 struct be_adapter *adapter = netdev_priv(netdev);
671
672 adapter->vlans_added++;
673 if (!be_physfn(adapter))
674 return;
675
676 adapter->vlan_tag[vid] = 1;
677 if (adapter->vlans_added <= (adapter->max_vlans + 1))
678 be_vid_config(adapter, false, 0);
679}
680
681static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
682{
683 struct be_adapter *adapter = netdev_priv(netdev);
684
685 adapter->vlans_added--;
686 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
687
688 if (!be_physfn(adapter))
689 return;
690
691 adapter->vlan_tag[vid] = 0;
692 if (adapter->vlans_added <= adapter->max_vlans)
693 be_vid_config(adapter, false, 0);
694}
695
696static void be_set_multicast_list(struct net_device *netdev)
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
699
700 if (netdev->flags & IFF_PROMISC) {
701 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
702 adapter->promiscuous = true;
703 goto done;
704 }
705
706 /* BE was previously in promiscuous mode; disable it */
707 if (adapter->promiscuous) {
708 adapter->promiscuous = false;
709 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
710 }
711
712 /* Enable multicast promisc if num configured exceeds what we support */
713 if (netdev->flags & IFF_ALLMULTI ||
714 netdev_mc_count(netdev) > BE_MAX_MC) {
715 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
716 &adapter->mc_cmd_mem);
717 goto done;
718 }
719
720 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
721 &adapter->mc_cmd_mem);
722done:
723 return;
724}
725
726static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 int status;
730
731 if (!adapter->sriov_enabled)
732 return -EPERM;
733
734 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
735 return -EINVAL;
736
737 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738 status = be_cmd_pmac_del(adapter,
739 adapter->vf_cfg[vf].vf_if_handle,
740 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
741
742 status = be_cmd_pmac_add(adapter, mac,
743 adapter->vf_cfg[vf].vf_if_handle,
744 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
745
746 if (status)
747 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748 mac, vf);
749 else
750 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
751
752 return status;
753}
754
755static int be_get_vf_config(struct net_device *netdev, int vf,
756 struct ifla_vf_info *vi)
757{
758 struct be_adapter *adapter = netdev_priv(netdev);
759
760 if (!adapter->sriov_enabled)
761 return -EPERM;
762
763 if (vf >= num_vfs)
764 return -EINVAL;
765
766 vi->vf = vf;
767 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
768 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
769 vi->qos = 0;
770 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
771
772 return 0;
773}
774
775static int be_set_vf_vlan(struct net_device *netdev,
776 int vf, u16 vlan, u8 qos)
777{
778 struct be_adapter *adapter = netdev_priv(netdev);
779 int status = 0;
780
781 if (!adapter->sriov_enabled)
782 return -EPERM;
783
784 if ((vf >= num_vfs) || (vlan > 4095))
785 return -EINVAL;
786
787 if (vlan) {
788 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789 adapter->vlans_added++;
790 } else {
791 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792 adapter->vlans_added--;
793 }
794
795 status = be_vid_config(adapter, true, vf);
796
797 if (status)
798 dev_info(&adapter->pdev->dev,
799 "VLAN %d config on VF %d failed\n", vlan, vf);
800 return status;
801}
802
803static int be_set_vf_tx_rate(struct net_device *netdev,
804 int vf, int rate)
805{
806 struct be_adapter *adapter = netdev_priv(netdev);
807 int status = 0;
808
809 if (!adapter->sriov_enabled)
810 return -EPERM;
811
812 if ((vf >= num_vfs) || (rate < 0))
813 return -EINVAL;
814
815 if (rate > 10000)
816 rate = 10000;
817
818 adapter->vf_cfg[vf].vf_tx_rate = rate;
819 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
820
821 if (status)
822 dev_info(&adapter->pdev->dev,
823 "tx rate %d on VF %d failed\n", rate, vf);
824 return status;
825}
826
827static void be_rx_rate_update(struct be_rx_obj *rxo)
828{
829 struct be_rx_stats *stats = &rxo->stats;
830 ulong now = jiffies;
831
832 /* Wrapped around */
833 if (time_before(now, stats->rx_jiffies)) {
834 stats->rx_jiffies = now;
835 return;
836 }
837
838 /* Update the rate once in two seconds */
839 if ((now - stats->rx_jiffies) < 2 * HZ)
840 return;
841
842 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843 now - stats->rx_jiffies);
844 stats->rx_jiffies = now;
845 stats->rx_bytes_prev = stats->rx_bytes;
846}
847
848static void be_rx_stats_update(struct be_rx_obj *rxo,
849 struct be_rx_compl_info *rxcp)
850{
851 struct be_rx_stats *stats = &rxo->stats;
852
853 stats->rx_compl++;
854 stats->rx_frags += rxcp->num_rcvd;
855 stats->rx_bytes += rxcp->pkt_size;
856 stats->rx_pkts++;
857 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
858 stats->rx_mcast_pkts++;
859 if (rxcp->err)
860 stats->rxcp_err++;
861}
862
863static inline bool csum_passed(struct be_rx_compl_info *rxcp)
864{
865 /* L4 checksum is not reliable for non TCP/UDP packets.
866 * Also ignore ipcksm for ipv6 pkts */
867 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868 (rxcp->ip_csum || rxcp->ipv6);
869}
870
871static struct be_rx_page_info *
872get_rx_page_info(struct be_adapter *adapter,
873 struct be_rx_obj *rxo,
874 u16 frag_idx)
875{
876 struct be_rx_page_info *rx_page_info;
877 struct be_queue_info *rxq = &rxo->q;
878
879 rx_page_info = &rxo->page_info_tbl[frag_idx];
880 BUG_ON(!rx_page_info->page);
881
882 if (rx_page_info->last_page_user) {
883 dma_unmap_page(&adapter->pdev->dev,
884 dma_unmap_addr(rx_page_info, bus),
885 adapter->big_page_size, DMA_FROM_DEVICE);
886 rx_page_info->last_page_user = false;
887 }
888
889 atomic_dec(&rxq->used);
890 return rx_page_info;
891}
892
893/* Throwaway the data in the Rx completion */
894static void be_rx_compl_discard(struct be_adapter *adapter,
895 struct be_rx_obj *rxo,
896 struct be_rx_compl_info *rxcp)
897{
898 struct be_queue_info *rxq = &rxo->q;
899 struct be_rx_page_info *page_info;
900 u16 i, num_rcvd = rxcp->num_rcvd;
901
902 for (i = 0; i < num_rcvd; i++) {
903 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
904 put_page(page_info->page);
905 memset(page_info, 0, sizeof(*page_info));
906 index_inc(&rxcp->rxq_idx, rxq->len);
907 }
908}
909
910/*
911 * skb_fill_rx_data forms a complete skb for an ether frame
912 * indicated by rxcp.
913 */
914static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
915 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
916{
917 struct be_queue_info *rxq = &rxo->q;
918 struct be_rx_page_info *page_info;
919 u16 i, j;
920 u16 hdr_len, curr_frag_len, remaining;
921 u8 *start;
922
923 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
924 start = page_address(page_info->page) + page_info->page_offset;
925 prefetch(start);
926
927 /* Copy data in the first descriptor of this completion */
928 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
929
930 /* Copy the header portion into skb_data */
931 hdr_len = min(BE_HDR_LEN, curr_frag_len);
932 memcpy(skb->data, start, hdr_len);
933 skb->len = curr_frag_len;
934 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935 /* Complete packet has now been moved to data */
936 put_page(page_info->page);
937 skb->data_len = 0;
938 skb->tail += curr_frag_len;
939 } else {
940 skb_shinfo(skb)->nr_frags = 1;
941 skb_shinfo(skb)->frags[0].page = page_info->page;
942 skb_shinfo(skb)->frags[0].page_offset =
943 page_info->page_offset + hdr_len;
944 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945 skb->data_len = curr_frag_len - hdr_len;
946 skb->tail += hdr_len;
947 }
948 page_info->page = NULL;
949
950 if (rxcp->pkt_size <= rx_frag_size) {
951 BUG_ON(rxcp->num_rcvd != 1);
952 return;
953 }
954
955 /* More frags present for this completion */
956 index_inc(&rxcp->rxq_idx, rxq->len);
957 remaining = rxcp->pkt_size - curr_frag_len;
958 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960 curr_frag_len = min(remaining, rx_frag_size);
961
962 /* Coalesce all frags from the same physical page in one slot */
963 if (page_info->page_offset == 0) {
964 /* Fresh page */
965 j++;
966 skb_shinfo(skb)->frags[j].page = page_info->page;
967 skb_shinfo(skb)->frags[j].page_offset =
968 page_info->page_offset;
969 skb_shinfo(skb)->frags[j].size = 0;
970 skb_shinfo(skb)->nr_frags++;
971 } else {
972 put_page(page_info->page);
973 }
974
975 skb_shinfo(skb)->frags[j].size += curr_frag_len;
976 skb->len += curr_frag_len;
977 skb->data_len += curr_frag_len;
978
979 remaining -= curr_frag_len;
980 index_inc(&rxcp->rxq_idx, rxq->len);
981 page_info->page = NULL;
982 }
983 BUG_ON(j > MAX_SKB_FRAGS);
984}
985
986/* Process the RX completion indicated by rxcp when GRO is disabled */
987static void be_rx_compl_process(struct be_adapter *adapter,
988 struct be_rx_obj *rxo,
989 struct be_rx_compl_info *rxcp)
990{
991 struct net_device *netdev = adapter->netdev;
992 struct sk_buff *skb;
993
994 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
995 if (unlikely(!skb)) {
996 if (net_ratelimit())
997 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
998 be_rx_compl_discard(adapter, rxo, rxcp);
999 return;
1000 }
1001
1002 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1003
1004 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1005 skb->ip_summed = CHECKSUM_UNNECESSARY;
1006 else
1007 skb_checksum_none_assert(skb);
1008
1009 skb->truesize = skb->len + sizeof(struct sk_buff);
1010 skb->protocol = eth_type_trans(skb, netdev);
1011 if (adapter->netdev->features & NETIF_F_RXHASH)
1012 skb->rxhash = rxcp->rss_hash;
1013
1014
1015 if (unlikely(rxcp->vlanf)) {
1016 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1017 kfree_skb(skb);
1018 return;
1019 }
1020 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1021 rxcp->vlan_tag);
1022 } else {
1023 netif_receive_skb(skb);
1024 }
1025}
1026
1027/* Process the RX completion indicated by rxcp when GRO is enabled */
1028static void be_rx_compl_process_gro(struct be_adapter *adapter,
1029 struct be_rx_obj *rxo,
1030 struct be_rx_compl_info *rxcp)
1031{
1032 struct be_rx_page_info *page_info;
1033 struct sk_buff *skb = NULL;
1034 struct be_queue_info *rxq = &rxo->q;
1035 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1036 u16 remaining, curr_frag_len;
1037 u16 i, j;
1038
1039 skb = napi_get_frags(&eq_obj->napi);
1040 if (!skb) {
1041 be_rx_compl_discard(adapter, rxo, rxcp);
1042 return;
1043 }
1044
1045 remaining = rxcp->pkt_size;
1046 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1047 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1048
1049 curr_frag_len = min(remaining, rx_frag_size);
1050
1051 /* Coalesce all frags from the same physical page in one slot */
1052 if (i == 0 || page_info->page_offset == 0) {
1053 /* First frag or Fresh page */
1054 j++;
1055 skb_shinfo(skb)->frags[j].page = page_info->page;
1056 skb_shinfo(skb)->frags[j].page_offset =
1057 page_info->page_offset;
1058 skb_shinfo(skb)->frags[j].size = 0;
1059 } else {
1060 put_page(page_info->page);
1061 }
1062 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1063
1064 remaining -= curr_frag_len;
1065 index_inc(&rxcp->rxq_idx, rxq->len);
1066 memset(page_info, 0, sizeof(*page_info));
1067 }
1068 BUG_ON(j > MAX_SKB_FRAGS);
1069
1070 skb_shinfo(skb)->nr_frags = j + 1;
1071 skb->len = rxcp->pkt_size;
1072 skb->data_len = rxcp->pkt_size;
1073 skb->truesize += rxcp->pkt_size;
1074 skb->ip_summed = CHECKSUM_UNNECESSARY;
1075 if (adapter->netdev->features & NETIF_F_RXHASH)
1076 skb->rxhash = rxcp->rss_hash;
1077
1078 if (likely(!rxcp->vlanf))
1079 napi_gro_frags(&eq_obj->napi);
1080 else
1081 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1082 rxcp->vlan_tag);
1083}
1084
1085static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1086 struct be_eth_rx_compl *compl,
1087 struct be_rx_compl_info *rxcp)
1088{
1089 rxcp->pkt_size =
1090 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1091 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1092 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1093 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1094 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1095 rxcp->ip_csum =
1096 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1097 rxcp->l4_csum =
1098 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1099 rxcp->ipv6 =
1100 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1101 rxcp->rxq_idx =
1102 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1103 rxcp->num_rcvd =
1104 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1105 rxcp->pkt_type =
1106 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1107 rxcp->rss_hash =
1108 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1109 if (rxcp->vlanf) {
1110 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1111 compl);
1112 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1113 compl);
1114 }
1115}
1116
1117static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1118 struct be_eth_rx_compl *compl,
1119 struct be_rx_compl_info *rxcp)
1120{
1121 rxcp->pkt_size =
1122 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1123 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1124 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1125 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1126 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1127 rxcp->ip_csum =
1128 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1129 rxcp->l4_csum =
1130 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1131 rxcp->ipv6 =
1132 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1133 rxcp->rxq_idx =
1134 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1135 rxcp->num_rcvd =
1136 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1137 rxcp->pkt_type =
1138 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1139 rxcp->rss_hash =
1140 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1141 if (rxcp->vlanf) {
1142 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1143 compl);
1144 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1145 compl);
1146 }
1147}
1148
1149static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1150{
1151 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1152 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1153 struct be_adapter *adapter = rxo->adapter;
1154
1155 /* For checking the valid bit it is Ok to use either definition as the
1156 * valid bit is at the same position in both v0 and v1 Rx compl */
1157 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1158 return NULL;
1159
1160 rmb();
1161 be_dws_le_to_cpu(compl, sizeof(*compl));
1162
1163 if (adapter->be3_native)
1164 be_parse_rx_compl_v1(adapter, compl, rxcp);
1165 else
1166 be_parse_rx_compl_v0(adapter, compl, rxcp);
1167
1168 if (rxcp->vlanf) {
1169 /* vlanf could be wrongly set in some cards.
1170 * ignore if vtm is not set */
1171 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1172 rxcp->vlanf = 0;
1173
1174 if (!lancer_chip(adapter))
1175 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1176
1177 if (((adapter->pvid & VLAN_VID_MASK) ==
1178 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1179 !adapter->vlan_tag[rxcp->vlan_tag])
1180 rxcp->vlanf = 0;
1181 }
1182
1183 /* As the compl has been parsed, reset it; we wont touch it again */
1184 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1185
1186 queue_tail_inc(&rxo->cq);
1187 return rxcp;
1188}
1189
1190static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1191{
1192 u32 order = get_order(size);
1193
1194 if (order > 0)
1195 gfp |= __GFP_COMP;
1196 return alloc_pages(gfp, order);
1197}
1198
1199/*
1200 * Allocate a page, split it to fragments of size rx_frag_size and post as
1201 * receive buffers to BE
1202 */
1203static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1204{
1205 struct be_adapter *adapter = rxo->adapter;
1206 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1207 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1208 struct be_queue_info *rxq = &rxo->q;
1209 struct page *pagep = NULL;
1210 struct be_eth_rx_d *rxd;
1211 u64 page_dmaaddr = 0, frag_dmaaddr;
1212 u32 posted, page_offset = 0;
1213
1214 page_info = &rxo->page_info_tbl[rxq->head];
1215 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1216 if (!pagep) {
1217 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1218 if (unlikely(!pagep)) {
1219 rxo->stats.rx_post_fail++;
1220 break;
1221 }
1222 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1223 0, adapter->big_page_size,
1224 DMA_FROM_DEVICE);
1225 page_info->page_offset = 0;
1226 } else {
1227 get_page(pagep);
1228 page_info->page_offset = page_offset + rx_frag_size;
1229 }
1230 page_offset = page_info->page_offset;
1231 page_info->page = pagep;
1232 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1233 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1234
1235 rxd = queue_head_node(rxq);
1236 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1237 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1238
1239 /* Any space left in the current big page for another frag? */
1240 if ((page_offset + rx_frag_size + rx_frag_size) >
1241 adapter->big_page_size) {
1242 pagep = NULL;
1243 page_info->last_page_user = true;
1244 }
1245
1246 prev_page_info = page_info;
1247 queue_head_inc(rxq);
1248 page_info = &page_info_tbl[rxq->head];
1249 }
1250 if (pagep)
1251 prev_page_info->last_page_user = true;
1252
1253 if (posted) {
1254 atomic_add(posted, &rxq->used);
1255 be_rxq_notify(adapter, rxq->id, posted);
1256 } else if (atomic_read(&rxq->used) == 0) {
1257 /* Let be_worker replenish when memory is available */
1258 rxo->rx_post_starved = true;
1259 }
1260}
1261
1262static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1263{
1264 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1265
1266 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1267 return NULL;
1268
1269 rmb();
1270 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1271
1272 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1273
1274 queue_tail_inc(tx_cq);
1275 return txcp;
1276}
1277
1278static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1279{
1280 struct be_queue_info *txq = &adapter->tx_obj.q;
1281 struct be_eth_wrb *wrb;
1282 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1283 struct sk_buff *sent_skb;
1284 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1285 bool unmap_skb_hdr = true;
1286
1287 sent_skb = sent_skbs[txq->tail];
1288 BUG_ON(!sent_skb);
1289 sent_skbs[txq->tail] = NULL;
1290
1291 /* skip header wrb */
1292 queue_tail_inc(txq);
1293
1294 do {
1295 cur_index = txq->tail;
1296 wrb = queue_tail_node(txq);
1297 unmap_tx_frag(&adapter->pdev->dev, wrb,
1298 (unmap_skb_hdr && skb_headlen(sent_skb)));
1299 unmap_skb_hdr = false;
1300
1301 num_wrbs++;
1302 queue_tail_inc(txq);
1303 } while (cur_index != last_index);
1304
1305 atomic_sub(num_wrbs, &txq->used);
1306
1307 kfree_skb(sent_skb);
1308}
1309
1310static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1311{
1312 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1313
1314 if (!eqe->evt)
1315 return NULL;
1316
1317 rmb();
1318 eqe->evt = le32_to_cpu(eqe->evt);
1319 queue_tail_inc(&eq_obj->q);
1320 return eqe;
1321}
1322
1323static int event_handle(struct be_adapter *adapter,
1324 struct be_eq_obj *eq_obj)
1325{
1326 struct be_eq_entry *eqe;
1327 u16 num = 0;
1328
1329 while ((eqe = event_get(eq_obj)) != NULL) {
1330 eqe->evt = 0;
1331 num++;
1332 }
1333
1334 /* Deal with any spurious interrupts that come
1335 * without events
1336 */
1337 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1338 if (num)
1339 napi_schedule(&eq_obj->napi);
1340
1341 return num;
1342}
1343
1344/* Just read and notify events without processing them.
1345 * Used at the time of destroying event queues */
1346static void be_eq_clean(struct be_adapter *adapter,
1347 struct be_eq_obj *eq_obj)
1348{
1349 struct be_eq_entry *eqe;
1350 u16 num = 0;
1351
1352 while ((eqe = event_get(eq_obj)) != NULL) {
1353 eqe->evt = 0;
1354 num++;
1355 }
1356
1357 if (num)
1358 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1359}
1360
1361static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1362{
1363 struct be_rx_page_info *page_info;
1364 struct be_queue_info *rxq = &rxo->q;
1365 struct be_queue_info *rx_cq = &rxo->cq;
1366 struct be_rx_compl_info *rxcp;
1367 u16 tail;
1368
1369 /* First cleanup pending rx completions */
1370 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1371 be_rx_compl_discard(adapter, rxo, rxcp);
1372 be_cq_notify(adapter, rx_cq->id, false, 1);
1373 }
1374
1375 /* Then free posted rx buffer that were not used */
1376 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1377 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1378 page_info = get_rx_page_info(adapter, rxo, tail);
1379 put_page(page_info->page);
1380 memset(page_info, 0, sizeof(*page_info));
1381 }
1382 BUG_ON(atomic_read(&rxq->used));
1383}
1384
1385static void be_tx_compl_clean(struct be_adapter *adapter)
1386{
1387 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1388 struct be_queue_info *txq = &adapter->tx_obj.q;
1389 struct be_eth_tx_compl *txcp;
1390 u16 end_idx, cmpl = 0, timeo = 0;
1391 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1392 struct sk_buff *sent_skb;
1393 bool dummy_wrb;
1394
1395 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1396 do {
1397 while ((txcp = be_tx_compl_get(tx_cq))) {
1398 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1399 wrb_index, txcp);
1400 be_tx_compl_process(adapter, end_idx);
1401 cmpl++;
1402 }
1403 if (cmpl) {
1404 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1405 cmpl = 0;
1406 }
1407
1408 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1409 break;
1410
1411 mdelay(1);
1412 } while (true);
1413
1414 if (atomic_read(&txq->used))
1415 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1416 atomic_read(&txq->used));
1417
1418 /* free posted tx for which compls will never arrive */
1419 while (atomic_read(&txq->used)) {
1420 sent_skb = sent_skbs[txq->tail];
1421 end_idx = txq->tail;
1422 index_adv(&end_idx,
1423 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1424 txq->len);
1425 be_tx_compl_process(adapter, end_idx);
1426 }
1427}
1428
1429static void be_mcc_queues_destroy(struct be_adapter *adapter)
1430{
1431 struct be_queue_info *q;
1432
1433 q = &adapter->mcc_obj.q;
1434 if (q->created)
1435 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1436 be_queue_free(adapter, q);
1437
1438 q = &adapter->mcc_obj.cq;
1439 if (q->created)
1440 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1441 be_queue_free(adapter, q);
1442}
1443
1444/* Must be called only after TX qs are created as MCC shares TX EQ */
1445static int be_mcc_queues_create(struct be_adapter *adapter)
1446{
1447 struct be_queue_info *q, *cq;
1448
1449 /* Alloc MCC compl queue */
1450 cq = &adapter->mcc_obj.cq;
1451 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1452 sizeof(struct be_mcc_compl)))
1453 goto err;
1454
1455 /* Ask BE to create MCC compl queue; share TX's eq */
1456 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1457 goto mcc_cq_free;
1458
1459 /* Alloc MCC queue */
1460 q = &adapter->mcc_obj.q;
1461 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1462 goto mcc_cq_destroy;
1463
1464 /* Ask BE to create MCC queue */
1465 if (be_cmd_mccq_create(adapter, q, cq))
1466 goto mcc_q_free;
1467
1468 return 0;
1469
1470mcc_q_free:
1471 be_queue_free(adapter, q);
1472mcc_cq_destroy:
1473 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1474mcc_cq_free:
1475 be_queue_free(adapter, cq);
1476err:
1477 return -1;
1478}
1479
1480static void be_tx_queues_destroy(struct be_adapter *adapter)
1481{
1482 struct be_queue_info *q;
1483
1484 q = &adapter->tx_obj.q;
1485 if (q->created)
1486 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1487 be_queue_free(adapter, q);
1488
1489 q = &adapter->tx_obj.cq;
1490 if (q->created)
1491 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1492 be_queue_free(adapter, q);
1493
1494 /* Clear any residual events */
1495 be_eq_clean(adapter, &adapter->tx_eq);
1496
1497 q = &adapter->tx_eq.q;
1498 if (q->created)
1499 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1500 be_queue_free(adapter, q);
1501}
1502
1503static int be_tx_queues_create(struct be_adapter *adapter)
1504{
1505 struct be_queue_info *eq, *q, *cq;
1506
1507 adapter->tx_eq.max_eqd = 0;
1508 adapter->tx_eq.min_eqd = 0;
1509 adapter->tx_eq.cur_eqd = 96;
1510 adapter->tx_eq.enable_aic = false;
1511 /* Alloc Tx Event queue */
1512 eq = &adapter->tx_eq.q;
1513 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1514 return -1;
1515
1516 /* Ask BE to create Tx Event queue */
1517 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1518 goto tx_eq_free;
1519
1520 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1521
1522
1523 /* Alloc TX eth compl queue */
1524 cq = &adapter->tx_obj.cq;
1525 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1526 sizeof(struct be_eth_tx_compl)))
1527 goto tx_eq_destroy;
1528
1529 /* Ask BE to create Tx eth compl queue */
1530 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1531 goto tx_cq_free;
1532
1533 /* Alloc TX eth queue */
1534 q = &adapter->tx_obj.q;
1535 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1536 goto tx_cq_destroy;
1537
1538 /* Ask BE to create Tx eth queue */
1539 if (be_cmd_txq_create(adapter, q, cq))
1540 goto tx_q_free;
1541 return 0;
1542
1543tx_q_free:
1544 be_queue_free(adapter, q);
1545tx_cq_destroy:
1546 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1547tx_cq_free:
1548 be_queue_free(adapter, cq);
1549tx_eq_destroy:
1550 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1551tx_eq_free:
1552 be_queue_free(adapter, eq);
1553 return -1;
1554}
1555
1556static void be_rx_queues_destroy(struct be_adapter *adapter)
1557{
1558 struct be_queue_info *q;
1559 struct be_rx_obj *rxo;
1560 int i;
1561
1562 for_all_rx_queues(adapter, rxo, i) {
1563 q = &rxo->q;
1564 if (q->created) {
1565 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1566 /* After the rxq is invalidated, wait for a grace time
1567 * of 1ms for all dma to end and the flush compl to
1568 * arrive
1569 */
1570 mdelay(1);
1571 be_rx_q_clean(adapter, rxo);
1572 }
1573 be_queue_free(adapter, q);
1574
1575 q = &rxo->cq;
1576 if (q->created)
1577 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1578 be_queue_free(adapter, q);
1579
1580 /* Clear any residual events */
1581 q = &rxo->rx_eq.q;
1582 if (q->created) {
1583 be_eq_clean(adapter, &rxo->rx_eq);
1584 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1585 }
1586 be_queue_free(adapter, q);
1587 }
1588}
1589
1590static u32 be_num_rxqs_want(struct be_adapter *adapter)
1591{
1592 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1593 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1594 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1595 } else {
1596 dev_warn(&adapter->pdev->dev,
1597 "No support for multiple RX queues\n");
1598 return 1;
1599 }
1600}
1601
1602static int be_rx_queues_create(struct be_adapter *adapter)
1603{
1604 struct be_queue_info *eq, *q, *cq;
1605 struct be_rx_obj *rxo;
1606 int rc, i;
1607
1608 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1609 msix_enabled(adapter) ?
1610 adapter->num_msix_vec - 1 : 1);
1611 if (adapter->num_rx_qs != MAX_RX_QS)
1612 dev_warn(&adapter->pdev->dev,
1613 "Can create only %d RX queues", adapter->num_rx_qs);
1614
1615 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1616 for_all_rx_queues(adapter, rxo, i) {
1617 rxo->adapter = adapter;
1618 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1619 rxo->rx_eq.enable_aic = true;
1620
1621 /* EQ */
1622 eq = &rxo->rx_eq.q;
1623 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1624 sizeof(struct be_eq_entry));
1625 if (rc)
1626 goto err;
1627
1628 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1629 if (rc)
1630 goto err;
1631
1632 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1633
1634 /* CQ */
1635 cq = &rxo->cq;
1636 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1637 sizeof(struct be_eth_rx_compl));
1638 if (rc)
1639 goto err;
1640
1641 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1642 if (rc)
1643 goto err;
1644 /* Rx Q */
1645 q = &rxo->q;
1646 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1647 sizeof(struct be_eth_rx_d));
1648 if (rc)
1649 goto err;
1650
1651 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1652 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1653 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1654 if (rc)
1655 goto err;
1656 }
1657
1658 if (be_multi_rxq(adapter)) {
1659 u8 rsstable[MAX_RSS_QS];
1660
1661 for_all_rss_queues(adapter, rxo, i)
1662 rsstable[i] = rxo->rss_id;
1663
1664 rc = be_cmd_rss_config(adapter, rsstable,
1665 adapter->num_rx_qs - 1);
1666 if (rc)
1667 goto err;
1668 }
1669
1670 return 0;
1671err:
1672 be_rx_queues_destroy(adapter);
1673 return -1;
1674}
1675
1676static bool event_peek(struct be_eq_obj *eq_obj)
1677{
1678 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1679 if (!eqe->evt)
1680 return false;
1681 else
1682 return true;
1683}
1684
1685static irqreturn_t be_intx(int irq, void *dev)
1686{
1687 struct be_adapter *adapter = dev;
1688 struct be_rx_obj *rxo;
1689 int isr, i, tx = 0 , rx = 0;
1690
1691 if (lancer_chip(adapter)) {
1692 if (event_peek(&adapter->tx_eq))
1693 tx = event_handle(adapter, &adapter->tx_eq);
1694 for_all_rx_queues(adapter, rxo, i) {
1695 if (event_peek(&rxo->rx_eq))
1696 rx |= event_handle(adapter, &rxo->rx_eq);
1697 }
1698
1699 if (!(tx || rx))
1700 return IRQ_NONE;
1701
1702 } else {
1703 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1704 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1705 if (!isr)
1706 return IRQ_NONE;
1707
1708 if ((1 << adapter->tx_eq.eq_idx & isr))
1709 event_handle(adapter, &adapter->tx_eq);
1710
1711 for_all_rx_queues(adapter, rxo, i) {
1712 if ((1 << rxo->rx_eq.eq_idx & isr))
1713 event_handle(adapter, &rxo->rx_eq);
1714 }
1715 }
1716
1717 return IRQ_HANDLED;
1718}
1719
1720static irqreturn_t be_msix_rx(int irq, void *dev)
1721{
1722 struct be_rx_obj *rxo = dev;
1723 struct be_adapter *adapter = rxo->adapter;
1724
1725 event_handle(adapter, &rxo->rx_eq);
1726
1727 return IRQ_HANDLED;
1728}
1729
1730static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1731{
1732 struct be_adapter *adapter = dev;
1733
1734 event_handle(adapter, &adapter->tx_eq);
1735
1736 return IRQ_HANDLED;
1737}
1738
1739static inline bool do_gro(struct be_rx_compl_info *rxcp)
1740{
1741 return (rxcp->tcpf && !rxcp->err) ? true : false;
1742}
1743
1744static int be_poll_rx(struct napi_struct *napi, int budget)
1745{
1746 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1747 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1748 struct be_adapter *adapter = rxo->adapter;
1749 struct be_queue_info *rx_cq = &rxo->cq;
1750 struct be_rx_compl_info *rxcp;
1751 u32 work_done;
1752
1753 rxo->stats.rx_polls++;
1754 for (work_done = 0; work_done < budget; work_done++) {
1755 rxcp = be_rx_compl_get(rxo);
1756 if (!rxcp)
1757 break;
1758
1759 /* Ignore flush completions */
1760 if (rxcp->num_rcvd) {
1761 if (do_gro(rxcp))
1762 be_rx_compl_process_gro(adapter, rxo, rxcp);
1763 else
1764 be_rx_compl_process(adapter, rxo, rxcp);
1765 }
1766 be_rx_stats_update(rxo, rxcp);
1767 }
1768
1769 /* Refill the queue */
1770 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1771 be_post_rx_frags(rxo, GFP_ATOMIC);
1772
1773 /* All consumed */
1774 if (work_done < budget) {
1775 napi_complete(napi);
1776 be_cq_notify(adapter, rx_cq->id, true, work_done);
1777 } else {
1778 /* More to be consumed; continue with interrupts disabled */
1779 be_cq_notify(adapter, rx_cq->id, false, work_done);
1780 }
1781 return work_done;
1782}
1783
1784/* As TX and MCC share the same EQ check for both TX and MCC completions.
1785 * For TX/MCC we don't honour budget; consume everything
1786 */
1787static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1788{
1789 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1790 struct be_adapter *adapter =
1791 container_of(tx_eq, struct be_adapter, tx_eq);
1792 struct be_queue_info *txq = &adapter->tx_obj.q;
1793 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1794 struct be_eth_tx_compl *txcp;
1795 int tx_compl = 0, mcc_compl, status = 0;
1796 u16 end_idx;
1797
1798 while ((txcp = be_tx_compl_get(tx_cq))) {
1799 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1800 wrb_index, txcp);
1801 be_tx_compl_process(adapter, end_idx);
1802 tx_compl++;
1803 }
1804
1805 mcc_compl = be_process_mcc(adapter, &status);
1806
1807 napi_complete(napi);
1808
1809 if (mcc_compl) {
1810 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1811 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1812 }
1813
1814 if (tx_compl) {
1815 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1816
1817 /* As Tx wrbs have been freed up, wake up netdev queue if
1818 * it was stopped due to lack of tx wrbs.
1819 */
1820 if (netif_queue_stopped(adapter->netdev) &&
1821 atomic_read(&txq->used) < txq->len / 2) {
1822 netif_wake_queue(adapter->netdev);
1823 }
1824
1825 tx_stats(adapter)->be_tx_events++;
1826 tx_stats(adapter)->be_tx_compl += tx_compl;
1827 }
1828
1829 return 1;
1830}
1831
1832void be_detect_dump_ue(struct be_adapter *adapter)
1833{
1834 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1835 u32 i;
1836
1837 pci_read_config_dword(adapter->pdev,
1838 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1839 pci_read_config_dword(adapter->pdev,
1840 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1841 pci_read_config_dword(adapter->pdev,
1842 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1843 pci_read_config_dword(adapter->pdev,
1844 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1845
1846 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1847 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1848
1849 if (ue_status_lo || ue_status_hi) {
1850 adapter->ue_detected = true;
1851 adapter->eeh_err = true;
1852 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1853 }
1854
1855 if (ue_status_lo) {
1856 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1857 if (ue_status_lo & 1)
1858 dev_err(&adapter->pdev->dev,
1859 "UE: %s bit set\n", ue_status_low_desc[i]);
1860 }
1861 }
1862 if (ue_status_hi) {
1863 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1864 if (ue_status_hi & 1)
1865 dev_err(&adapter->pdev->dev,
1866 "UE: %s bit set\n", ue_status_hi_desc[i]);
1867 }
1868 }
1869
1870}
1871
1872static void be_worker(struct work_struct *work)
1873{
1874 struct be_adapter *adapter =
1875 container_of(work, struct be_adapter, work.work);
1876 struct be_rx_obj *rxo;
1877 int i;
1878
1879 if (!adapter->ue_detected && !lancer_chip(adapter))
1880 be_detect_dump_ue(adapter);
1881
1882 /* when interrupts are not yet enabled, just reap any pending
1883 * mcc completions */
1884 if (!netif_running(adapter->netdev)) {
1885 int mcc_compl, status = 0;
1886
1887 mcc_compl = be_process_mcc(adapter, &status);
1888
1889 if (mcc_compl) {
1890 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1891 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1892 }
1893
1894 goto reschedule;
1895 }
1896
1897 if (!adapter->stats_cmd_sent)
1898 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1899
1900 be_tx_rate_update(adapter);
1901
1902 for_all_rx_queues(adapter, rxo, i) {
1903 be_rx_rate_update(rxo);
1904 be_rx_eqd_update(adapter, rxo);
1905
1906 if (rxo->rx_post_starved) {
1907 rxo->rx_post_starved = false;
1908 be_post_rx_frags(rxo, GFP_KERNEL);
1909 }
1910 }
1911
1912reschedule:
1913 adapter->work_counter++;
1914 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1915}
1916
1917static void be_msix_disable(struct be_adapter *adapter)
1918{
1919 if (msix_enabled(adapter)) {
1920 pci_disable_msix(adapter->pdev);
1921 adapter->num_msix_vec = 0;
1922 }
1923}
1924
1925static void be_msix_enable(struct be_adapter *adapter)
1926{
1927#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1928 int i, status, num_vec;
1929
1930 num_vec = be_num_rxqs_want(adapter) + 1;
1931
1932 for (i = 0; i < num_vec; i++)
1933 adapter->msix_entries[i].entry = i;
1934
1935 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
1936 if (status == 0) {
1937 goto done;
1938 } else if (status >= BE_MIN_MSIX_VECTORS) {
1939 num_vec = status;
1940 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1941 num_vec) == 0)
1942 goto done;
1943 }
1944 return;
1945done:
1946 adapter->num_msix_vec = num_vec;
1947 return;
1948}
1949
1950static void be_sriov_enable(struct be_adapter *adapter)
1951{
1952 be_check_sriov_fn_type(adapter);
1953#ifdef CONFIG_PCI_IOV
1954 if (be_physfn(adapter) && num_vfs) {
1955 int status, pos;
1956 u16 nvfs;
1957
1958 pos = pci_find_ext_capability(adapter->pdev,
1959 PCI_EXT_CAP_ID_SRIOV);
1960 pci_read_config_word(adapter->pdev,
1961 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1962
1963 if (num_vfs > nvfs) {
1964 dev_info(&adapter->pdev->dev,
1965 "Device supports %d VFs and not %d\n",
1966 nvfs, num_vfs);
1967 num_vfs = nvfs;
1968 }
1969
1970 status = pci_enable_sriov(adapter->pdev, num_vfs);
1971 adapter->sriov_enabled = status ? false : true;
1972 }
1973#endif
1974}
1975
1976static void be_sriov_disable(struct be_adapter *adapter)
1977{
1978#ifdef CONFIG_PCI_IOV
1979 if (adapter->sriov_enabled) {
1980 pci_disable_sriov(adapter->pdev);
1981 adapter->sriov_enabled = false;
1982 }
1983#endif
1984}
1985
1986static inline int be_msix_vec_get(struct be_adapter *adapter,
1987 struct be_eq_obj *eq_obj)
1988{
1989 return adapter->msix_entries[eq_obj->eq_idx].vector;
1990}
1991
1992static int be_request_irq(struct be_adapter *adapter,
1993 struct be_eq_obj *eq_obj,
1994 void *handler, char *desc, void *context)
1995{
1996 struct net_device *netdev = adapter->netdev;
1997 int vec;
1998
1999 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2000 vec = be_msix_vec_get(adapter, eq_obj);
2001 return request_irq(vec, handler, 0, eq_obj->desc, context);
2002}
2003
2004static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2005 void *context)
2006{
2007 int vec = be_msix_vec_get(adapter, eq_obj);
2008 free_irq(vec, context);
2009}
2010
2011static int be_msix_register(struct be_adapter *adapter)
2012{
2013 struct be_rx_obj *rxo;
2014 int status, i;
2015 char qname[10];
2016
2017 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2018 adapter);
2019 if (status)
2020 goto err;
2021
2022 for_all_rx_queues(adapter, rxo, i) {
2023 sprintf(qname, "rxq%d", i);
2024 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2025 qname, rxo);
2026 if (status)
2027 goto err_msix;
2028 }
2029
2030 return 0;
2031
2032err_msix:
2033 be_free_irq(adapter, &adapter->tx_eq, adapter);
2034
2035 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2036 be_free_irq(adapter, &rxo->rx_eq, rxo);
2037
2038err:
2039 dev_warn(&adapter->pdev->dev,
2040 "MSIX Request IRQ failed - err %d\n", status);
2041 be_msix_disable(adapter);
2042 return status;
2043}
2044
2045static int be_irq_register(struct be_adapter *adapter)
2046{
2047 struct net_device *netdev = adapter->netdev;
2048 int status;
2049
2050 if (msix_enabled(adapter)) {
2051 status = be_msix_register(adapter);
2052 if (status == 0)
2053 goto done;
2054 /* INTx is not supported for VF */
2055 if (!be_physfn(adapter))
2056 return status;
2057 }
2058
2059 /* INTx */
2060 netdev->irq = adapter->pdev->irq;
2061 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2062 adapter);
2063 if (status) {
2064 dev_err(&adapter->pdev->dev,
2065 "INTx request IRQ failed - err %d\n", status);
2066 return status;
2067 }
2068done:
2069 adapter->isr_registered = true;
2070 return 0;
2071}
2072
2073static void be_irq_unregister(struct be_adapter *adapter)
2074{
2075 struct net_device *netdev = adapter->netdev;
2076 struct be_rx_obj *rxo;
2077 int i;
2078
2079 if (!adapter->isr_registered)
2080 return;
2081
2082 /* INTx */
2083 if (!msix_enabled(adapter)) {
2084 free_irq(netdev->irq, adapter);
2085 goto done;
2086 }
2087
2088 /* MSIx */
2089 be_free_irq(adapter, &adapter->tx_eq, adapter);
2090
2091 for_all_rx_queues(adapter, rxo, i)
2092 be_free_irq(adapter, &rxo->rx_eq, rxo);
2093
2094done:
2095 adapter->isr_registered = false;
2096}
2097
2098static int be_close(struct net_device *netdev)
2099{
2100 struct be_adapter *adapter = netdev_priv(netdev);
2101 struct be_rx_obj *rxo;
2102 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2103 int vec, i;
2104
2105 be_async_mcc_disable(adapter);
2106
2107 netif_carrier_off(netdev);
2108 adapter->link_up = false;
2109
2110 if (!lancer_chip(adapter))
2111 be_intr_set(adapter, false);
2112
2113 for_all_rx_queues(adapter, rxo, i)
2114 napi_disable(&rxo->rx_eq.napi);
2115
2116 napi_disable(&tx_eq->napi);
2117
2118 if (lancer_chip(adapter)) {
2119 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2120 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2121 for_all_rx_queues(adapter, rxo, i)
2122 be_cq_notify(adapter, rxo->cq.id, false, 0);
2123 }
2124
2125 if (msix_enabled(adapter)) {
2126 vec = be_msix_vec_get(adapter, tx_eq);
2127 synchronize_irq(vec);
2128
2129 for_all_rx_queues(adapter, rxo, i) {
2130 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2131 synchronize_irq(vec);
2132 }
2133 } else {
2134 synchronize_irq(netdev->irq);
2135 }
2136 be_irq_unregister(adapter);
2137
2138 /* Wait for all pending tx completions to arrive so that
2139 * all tx skbs are freed.
2140 */
2141 be_tx_compl_clean(adapter);
2142
2143 return 0;
2144}
2145
2146static int be_open(struct net_device *netdev)
2147{
2148 struct be_adapter *adapter = netdev_priv(netdev);
2149 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2150 struct be_rx_obj *rxo;
2151 bool link_up;
2152 int status, i;
2153 u8 mac_speed;
2154 u16 link_speed;
2155
2156 for_all_rx_queues(adapter, rxo, i) {
2157 be_post_rx_frags(rxo, GFP_KERNEL);
2158 napi_enable(&rxo->rx_eq.napi);
2159 }
2160 napi_enable(&tx_eq->napi);
2161
2162 be_irq_register(adapter);
2163
2164 if (!lancer_chip(adapter))
2165 be_intr_set(adapter, true);
2166
2167 /* The evt queues are created in unarmed state; arm them */
2168 for_all_rx_queues(adapter, rxo, i) {
2169 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2170 be_cq_notify(adapter, rxo->cq.id, true, 0);
2171 }
2172 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2173
2174 /* Now that interrupts are on we can process async mcc */
2175 be_async_mcc_enable(adapter);
2176
2177 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2178 &link_speed, 0);
2179 if (status)
2180 goto err;
2181 be_link_status_update(adapter, link_up);
2182
2183 if (be_physfn(adapter)) {
2184 status = be_vid_config(adapter, false, 0);
2185 if (status)
2186 goto err;
2187
2188 status = be_cmd_set_flow_control(adapter,
2189 adapter->tx_fc, adapter->rx_fc);
2190 if (status)
2191 goto err;
2192 }
2193
2194 return 0;
2195err:
2196 be_close(adapter->netdev);
2197 return -EIO;
2198}
2199
2200static int be_setup_wol(struct be_adapter *adapter, bool enable)
2201{
2202 struct be_dma_mem cmd;
2203 int status = 0;
2204 u8 mac[ETH_ALEN];
2205
2206 memset(mac, 0, ETH_ALEN);
2207
2208 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2209 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2210 GFP_KERNEL);
2211 if (cmd.va == NULL)
2212 return -1;
2213 memset(cmd.va, 0, cmd.size);
2214
2215 if (enable) {
2216 status = pci_write_config_dword(adapter->pdev,
2217 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2218 if (status) {
2219 dev_err(&adapter->pdev->dev,
2220 "Could not enable Wake-on-lan\n");
2221 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2222 cmd.dma);
2223 return status;
2224 }
2225 status = be_cmd_enable_magic_wol(adapter,
2226 adapter->netdev->dev_addr, &cmd);
2227 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2228 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2229 } else {
2230 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2231 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2232 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2233 }
2234
2235 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2236 return status;
2237}
2238
2239/*
2240 * Generate a seed MAC address from the PF MAC Address using jhash.
2241 * MAC Address for VFs are assigned incrementally starting from the seed.
2242 * These addresses are programmed in the ASIC by the PF and the VF driver
2243 * queries for the MAC address during its probe.
2244 */
2245static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2246{
2247 u32 vf = 0;
2248 int status = 0;
2249 u8 mac[ETH_ALEN];
2250
2251 be_vf_eth_addr_generate(adapter, mac);
2252
2253 for (vf = 0; vf < num_vfs; vf++) {
2254 status = be_cmd_pmac_add(adapter, mac,
2255 adapter->vf_cfg[vf].vf_if_handle,
2256 &adapter->vf_cfg[vf].vf_pmac_id,
2257 vf + 1);
2258 if (status)
2259 dev_err(&adapter->pdev->dev,
2260 "Mac address add failed for VF %d\n", vf);
2261 else
2262 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2263
2264 mac[5] += 1;
2265 }
2266 return status;
2267}
2268
2269static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2270{
2271 u32 vf;
2272
2273 for (vf = 0; vf < num_vfs; vf++) {
2274 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2275 be_cmd_pmac_del(adapter,
2276 adapter->vf_cfg[vf].vf_if_handle,
2277 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2278 }
2279}
2280
2281static int be_setup(struct be_adapter *adapter)
2282{
2283 struct net_device *netdev = adapter->netdev;
2284 u32 cap_flags, en_flags, vf = 0;
2285 int status;
2286 u8 mac[ETH_ALEN];
2287
2288 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2289 BE_IF_FLAGS_BROADCAST |
2290 BE_IF_FLAGS_MULTICAST;
2291
2292 if (be_physfn(adapter)) {
2293 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2294 BE_IF_FLAGS_PROMISCUOUS |
2295 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2296 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2297
2298 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2299 cap_flags |= BE_IF_FLAGS_RSS;
2300 en_flags |= BE_IF_FLAGS_RSS;
2301 }
2302 }
2303
2304 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2305 netdev->dev_addr, false/* pmac_invalid */,
2306 &adapter->if_handle, &adapter->pmac_id, 0);
2307 if (status != 0)
2308 goto do_none;
2309
2310 if (be_physfn(adapter)) {
2311 if (adapter->sriov_enabled) {
2312 while (vf < num_vfs) {
2313 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2314 BE_IF_FLAGS_BROADCAST;
2315 status = be_cmd_if_create(adapter, cap_flags,
2316 en_flags, mac, true,
2317 &adapter->vf_cfg[vf].vf_if_handle,
2318 NULL, vf+1);
2319 if (status) {
2320 dev_err(&adapter->pdev->dev,
2321 "Interface Create failed for VF %d\n",
2322 vf);
2323 goto if_destroy;
2324 }
2325 adapter->vf_cfg[vf].vf_pmac_id =
2326 BE_INVALID_PMAC_ID;
2327 vf++;
2328 }
2329 }
2330 } else {
2331 status = be_cmd_mac_addr_query(adapter, mac,
2332 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2333 if (!status) {
2334 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2335 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2336 }
2337 }
2338
2339 status = be_tx_queues_create(adapter);
2340 if (status != 0)
2341 goto if_destroy;
2342
2343 status = be_rx_queues_create(adapter);
2344 if (status != 0)
2345 goto tx_qs_destroy;
2346
2347 status = be_mcc_queues_create(adapter);
2348 if (status != 0)
2349 goto rx_qs_destroy;
2350
2351 adapter->link_speed = -1;
2352
2353 return 0;
2354
2355rx_qs_destroy:
2356 be_rx_queues_destroy(adapter);
2357tx_qs_destroy:
2358 be_tx_queues_destroy(adapter);
2359if_destroy:
2360 if (be_physfn(adapter) && adapter->sriov_enabled)
2361 for (vf = 0; vf < num_vfs; vf++)
2362 if (adapter->vf_cfg[vf].vf_if_handle)
2363 be_cmd_if_destroy(adapter,
2364 adapter->vf_cfg[vf].vf_if_handle,
2365 vf + 1);
2366 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2367do_none:
2368 return status;
2369}
2370
2371static int be_clear(struct be_adapter *adapter)
2372{
2373 int vf;
2374
2375 if (be_physfn(adapter) && adapter->sriov_enabled)
2376 be_vf_eth_addr_rem(adapter);
2377
2378 be_mcc_queues_destroy(adapter);
2379 be_rx_queues_destroy(adapter);
2380 be_tx_queues_destroy(adapter);
2381 adapter->eq_next_idx = 0;
2382
2383 if (be_physfn(adapter) && adapter->sriov_enabled)
2384 for (vf = 0; vf < num_vfs; vf++)
2385 if (adapter->vf_cfg[vf].vf_if_handle)
2386 be_cmd_if_destroy(adapter,
2387 adapter->vf_cfg[vf].vf_if_handle,
2388 vf + 1);
2389
2390 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2391
2392 /* tell fw we're done with firing cmds */
2393 be_cmd_fw_clean(adapter);
2394 return 0;
2395}
2396
2397
2398#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2399static bool be_flash_redboot(struct be_adapter *adapter,
2400 const u8 *p, u32 img_start, int image_size,
2401 int hdr_size)
2402{
2403 u32 crc_offset;
2404 u8 flashed_crc[4];
2405 int status;
2406
2407 crc_offset = hdr_size + img_start + image_size - 4;
2408
2409 p += crc_offset;
2410
2411 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2412 (image_size - 4));
2413 if (status) {
2414 dev_err(&adapter->pdev->dev,
2415 "could not get crc from flash, not flashing redboot\n");
2416 return false;
2417 }
2418
2419 /*update redboot only if crc does not match*/
2420 if (!memcmp(flashed_crc, p, 4))
2421 return false;
2422 else
2423 return true;
2424}
2425
2426static int be_flash_data(struct be_adapter *adapter,
2427 const struct firmware *fw,
2428 struct be_dma_mem *flash_cmd, int num_of_images)
2429
2430{
2431 int status = 0, i, filehdr_size = 0;
2432 u32 total_bytes = 0, flash_op;
2433 int num_bytes;
2434 const u8 *p = fw->data;
2435 struct be_cmd_write_flashrom *req = flash_cmd->va;
2436 const struct flash_comp *pflashcomp;
2437 int num_comp;
2438
2439 static const struct flash_comp gen3_flash_types[9] = {
2440 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2441 FLASH_IMAGE_MAX_SIZE_g3},
2442 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2443 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2444 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2445 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2446 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2447 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2448 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2449 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2450 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2451 FLASH_IMAGE_MAX_SIZE_g3},
2452 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2453 FLASH_IMAGE_MAX_SIZE_g3},
2454 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2455 FLASH_IMAGE_MAX_SIZE_g3},
2456 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2457 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2458 };
2459 static const struct flash_comp gen2_flash_types[8] = {
2460 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2461 FLASH_IMAGE_MAX_SIZE_g2},
2462 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2463 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2464 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2465 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2466 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2467 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2468 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2469 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2470 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2471 FLASH_IMAGE_MAX_SIZE_g2},
2472 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2473 FLASH_IMAGE_MAX_SIZE_g2},
2474 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2475 FLASH_IMAGE_MAX_SIZE_g2}
2476 };
2477
2478 if (adapter->generation == BE_GEN3) {
2479 pflashcomp = gen3_flash_types;
2480 filehdr_size = sizeof(struct flash_file_hdr_g3);
2481 num_comp = ARRAY_SIZE(gen3_flash_types);
2482 } else {
2483 pflashcomp = gen2_flash_types;
2484 filehdr_size = sizeof(struct flash_file_hdr_g2);
2485 num_comp = ARRAY_SIZE(gen2_flash_types);
2486 }
2487 for (i = 0; i < num_comp; i++) {
2488 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2489 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2490 continue;
2491 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2492 (!be_flash_redboot(adapter, fw->data,
2493 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2494 (num_of_images * sizeof(struct image_hdr)))))
2495 continue;
2496 p = fw->data;
2497 p += filehdr_size + pflashcomp[i].offset
2498 + (num_of_images * sizeof(struct image_hdr));
2499 if (p + pflashcomp[i].size > fw->data + fw->size)
2500 return -1;
2501 total_bytes = pflashcomp[i].size;
2502 while (total_bytes) {
2503 if (total_bytes > 32*1024)
2504 num_bytes = 32*1024;
2505 else
2506 num_bytes = total_bytes;
2507 total_bytes -= num_bytes;
2508
2509 if (!total_bytes)
2510 flash_op = FLASHROM_OPER_FLASH;
2511 else
2512 flash_op = FLASHROM_OPER_SAVE;
2513 memcpy(req->params.data_buf, p, num_bytes);
2514 p += num_bytes;
2515 status = be_cmd_write_flashrom(adapter, flash_cmd,
2516 pflashcomp[i].optype, flash_op, num_bytes);
2517 if (status) {
2518 dev_err(&adapter->pdev->dev,
2519 "cmd to write to flash rom failed.\n");
2520 return -1;
2521 }
2522 yield();
2523 }
2524 }
2525 return 0;
2526}
2527
2528static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2529{
2530 if (fhdr == NULL)
2531 return 0;
2532 if (fhdr->build[0] == '3')
2533 return BE_GEN3;
2534 else if (fhdr->build[0] == '2')
2535 return BE_GEN2;
2536 else
2537 return 0;
2538}
2539
2540int be_load_fw(struct be_adapter *adapter, u8 *func)
2541{
2542 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2543 const struct firmware *fw;
2544 struct flash_file_hdr_g2 *fhdr;
2545 struct flash_file_hdr_g3 *fhdr3;
2546 struct image_hdr *img_hdr_ptr = NULL;
2547 struct be_dma_mem flash_cmd;
2548 int status, i = 0, num_imgs = 0;
2549 const u8 *p;
2550
2551 if (!netif_running(adapter->netdev)) {
2552 dev_err(&adapter->pdev->dev,
2553 "Firmware load not allowed (interface is down)\n");
2554 return -EPERM;
2555 }
2556
2557 strcpy(fw_file, func);
2558
2559 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2560 if (status)
2561 goto fw_exit;
2562
2563 p = fw->data;
2564 fhdr = (struct flash_file_hdr_g2 *) p;
2565 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2566
2567 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2568 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2569 &flash_cmd.dma, GFP_KERNEL);
2570 if (!flash_cmd.va) {
2571 status = -ENOMEM;
2572 dev_err(&adapter->pdev->dev,
2573 "Memory allocation failure while flashing\n");
2574 goto fw_exit;
2575 }
2576
2577 if ((adapter->generation == BE_GEN3) &&
2578 (get_ufigen_type(fhdr) == BE_GEN3)) {
2579 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2580 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2581 for (i = 0; i < num_imgs; i++) {
2582 img_hdr_ptr = (struct image_hdr *) (fw->data +
2583 (sizeof(struct flash_file_hdr_g3) +
2584 i * sizeof(struct image_hdr)));
2585 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2586 status = be_flash_data(adapter, fw, &flash_cmd,
2587 num_imgs);
2588 }
2589 } else if ((adapter->generation == BE_GEN2) &&
2590 (get_ufigen_type(fhdr) == BE_GEN2)) {
2591 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2592 } else {
2593 dev_err(&adapter->pdev->dev,
2594 "UFI and Interface are not compatible for flashing\n");
2595 status = -1;
2596 }
2597
2598 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2599 flash_cmd.dma);
2600 if (status) {
2601 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2602 goto fw_exit;
2603 }
2604
2605 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2606
2607fw_exit:
2608 release_firmware(fw);
2609 return status;
2610}
2611
2612static struct net_device_ops be_netdev_ops = {
2613 .ndo_open = be_open,
2614 .ndo_stop = be_close,
2615 .ndo_start_xmit = be_xmit,
2616 .ndo_set_rx_mode = be_set_multicast_list,
2617 .ndo_set_mac_address = be_mac_addr_set,
2618 .ndo_change_mtu = be_change_mtu,
2619 .ndo_validate_addr = eth_validate_addr,
2620 .ndo_vlan_rx_register = be_vlan_register,
2621 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2622 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2623 .ndo_set_vf_mac = be_set_vf_mac,
2624 .ndo_set_vf_vlan = be_set_vf_vlan,
2625 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2626 .ndo_get_vf_config = be_get_vf_config
2627};
2628
2629static void be_netdev_init(struct net_device *netdev)
2630{
2631 struct be_adapter *adapter = netdev_priv(netdev);
2632 struct be_rx_obj *rxo;
2633 int i;
2634
2635 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2636 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2637 NETIF_F_HW_VLAN_TX;
2638 if (be_multi_rxq(adapter))
2639 netdev->hw_features |= NETIF_F_RXHASH;
2640
2641 netdev->features |= netdev->hw_features |
2642 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2643
2644 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2645 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2646
2647 if (lancer_chip(adapter))
2648 netdev->vlan_features |= NETIF_F_TSO6;
2649
2650 netdev->flags |= IFF_MULTICAST;
2651
2652 /* Default settings for Rx and Tx flow control */
2653 adapter->rx_fc = true;
2654 adapter->tx_fc = true;
2655
2656 netif_set_gso_max_size(netdev, 65535);
2657
2658 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2659
2660 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2661
2662 for_all_rx_queues(adapter, rxo, i)
2663 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2664 BE_NAPI_WEIGHT);
2665
2666 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2667 BE_NAPI_WEIGHT);
2668}
2669
2670static void be_unmap_pci_bars(struct be_adapter *adapter)
2671{
2672 if (adapter->csr)
2673 iounmap(adapter->csr);
2674 if (adapter->db)
2675 iounmap(adapter->db);
2676 if (adapter->pcicfg && be_physfn(adapter))
2677 iounmap(adapter->pcicfg);
2678}
2679
2680static int be_map_pci_bars(struct be_adapter *adapter)
2681{
2682 u8 __iomem *addr;
2683 int pcicfg_reg, db_reg;
2684
2685 if (lancer_chip(adapter)) {
2686 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2687 pci_resource_len(adapter->pdev, 0));
2688 if (addr == NULL)
2689 return -ENOMEM;
2690 adapter->db = addr;
2691 return 0;
2692 }
2693
2694 if (be_physfn(adapter)) {
2695 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2696 pci_resource_len(adapter->pdev, 2));
2697 if (addr == NULL)
2698 return -ENOMEM;
2699 adapter->csr = addr;
2700 }
2701
2702 if (adapter->generation == BE_GEN2) {
2703 pcicfg_reg = 1;
2704 db_reg = 4;
2705 } else {
2706 pcicfg_reg = 0;
2707 if (be_physfn(adapter))
2708 db_reg = 4;
2709 else
2710 db_reg = 0;
2711 }
2712 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2713 pci_resource_len(adapter->pdev, db_reg));
2714 if (addr == NULL)
2715 goto pci_map_err;
2716 adapter->db = addr;
2717
2718 if (be_physfn(adapter)) {
2719 addr = ioremap_nocache(
2720 pci_resource_start(adapter->pdev, pcicfg_reg),
2721 pci_resource_len(adapter->pdev, pcicfg_reg));
2722 if (addr == NULL)
2723 goto pci_map_err;
2724 adapter->pcicfg = addr;
2725 } else
2726 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2727
2728 return 0;
2729pci_map_err:
2730 be_unmap_pci_bars(adapter);
2731 return -ENOMEM;
2732}
2733
2734
2735static void be_ctrl_cleanup(struct be_adapter *adapter)
2736{
2737 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2738
2739 be_unmap_pci_bars(adapter);
2740
2741 if (mem->va)
2742 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2743 mem->dma);
2744
2745 mem = &adapter->mc_cmd_mem;
2746 if (mem->va)
2747 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2748 mem->dma);
2749}
2750
2751static int be_ctrl_init(struct be_adapter *adapter)
2752{
2753 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2754 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2755 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2756 int status;
2757
2758 status = be_map_pci_bars(adapter);
2759 if (status)
2760 goto done;
2761
2762 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2763 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2764 mbox_mem_alloc->size,
2765 &mbox_mem_alloc->dma,
2766 GFP_KERNEL);
2767 if (!mbox_mem_alloc->va) {
2768 status = -ENOMEM;
2769 goto unmap_pci_bars;
2770 }
2771
2772 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2773 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2774 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2775 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2776
2777 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2778 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2779 mc_cmd_mem->size, &mc_cmd_mem->dma,
2780 GFP_KERNEL);
2781 if (mc_cmd_mem->va == NULL) {
2782 status = -ENOMEM;
2783 goto free_mbox;
2784 }
2785 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2786
2787 mutex_init(&adapter->mbox_lock);
2788 spin_lock_init(&adapter->mcc_lock);
2789 spin_lock_init(&adapter->mcc_cq_lock);
2790
2791 init_completion(&adapter->flash_compl);
2792 pci_save_state(adapter->pdev);
2793 return 0;
2794
2795free_mbox:
2796 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2797 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2798
2799unmap_pci_bars:
2800 be_unmap_pci_bars(adapter);
2801
2802done:
2803 return status;
2804}
2805
2806static void be_stats_cleanup(struct be_adapter *adapter)
2807{
2808 struct be_dma_mem *cmd = &adapter->stats_cmd;
2809
2810 if (cmd->va)
2811 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2812 cmd->va, cmd->dma);
2813}
2814
2815static int be_stats_init(struct be_adapter *adapter)
2816{
2817 struct be_dma_mem *cmd = &adapter->stats_cmd;
2818
2819 cmd->size = sizeof(struct be_cmd_req_get_stats);
2820 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2821 GFP_KERNEL);
2822 if (cmd->va == NULL)
2823 return -1;
2824 memset(cmd->va, 0, cmd->size);
2825 return 0;
2826}
2827
2828static void __devexit be_remove(struct pci_dev *pdev)
2829{
2830 struct be_adapter *adapter = pci_get_drvdata(pdev);
2831
2832 if (!adapter)
2833 return;
2834
2835 cancel_delayed_work_sync(&adapter->work);
2836
2837 unregister_netdev(adapter->netdev);
2838
2839 be_clear(adapter);
2840
2841 be_stats_cleanup(adapter);
2842
2843 be_ctrl_cleanup(adapter);
2844
2845 kfree(adapter->vf_cfg);
2846 be_sriov_disable(adapter);
2847
2848 be_msix_disable(adapter);
2849
2850 pci_set_drvdata(pdev, NULL);
2851 pci_release_regions(pdev);
2852 pci_disable_device(pdev);
2853
2854 free_netdev(adapter->netdev);
2855}
2856
2857static int be_get_config(struct be_adapter *adapter)
2858{
2859 int status;
2860 u8 mac[ETH_ALEN];
2861
2862 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2863 if (status)
2864 return status;
2865
2866 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2867 &adapter->function_mode, &adapter->function_caps);
2868 if (status)
2869 return status;
2870
2871 memset(mac, 0, ETH_ALEN);
2872
2873 if (be_physfn(adapter)) {
2874 status = be_cmd_mac_addr_query(adapter, mac,
2875 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2876
2877 if (status)
2878 return status;
2879
2880 if (!is_valid_ether_addr(mac))
2881 return -EADDRNOTAVAIL;
2882
2883 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2884 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2885 }
2886
2887 if (adapter->function_mode & 0x400)
2888 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2889 else
2890 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2891
2892 status = be_cmd_get_cntl_attributes(adapter);
2893 if (status)
2894 return status;
2895
2896 be_cmd_check_native_mode(adapter);
2897 return 0;
2898}
2899
2900static int be_dev_family_check(struct be_adapter *adapter)
2901{
2902 struct pci_dev *pdev = adapter->pdev;
2903 u32 sli_intf = 0, if_type;
2904
2905 switch (pdev->device) {
2906 case BE_DEVICE_ID1:
2907 case OC_DEVICE_ID1:
2908 adapter->generation = BE_GEN2;
2909 break;
2910 case BE_DEVICE_ID2:
2911 case OC_DEVICE_ID2:
2912 adapter->generation = BE_GEN3;
2913 break;
2914 case OC_DEVICE_ID3:
2915 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2916 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2917 SLI_INTF_IF_TYPE_SHIFT;
2918
2919 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2920 if_type != 0x02) {
2921 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2922 return -EINVAL;
2923 }
2924 if (num_vfs > 0) {
2925 dev_err(&pdev->dev, "VFs not supported\n");
2926 return -EINVAL;
2927 }
2928 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2929 SLI_INTF_FAMILY_SHIFT);
2930 adapter->generation = BE_GEN3;
2931 break;
2932 default:
2933 adapter->generation = 0;
2934 }
2935 return 0;
2936}
2937
2938static int lancer_wait_ready(struct be_adapter *adapter)
2939{
2940#define SLIPORT_READY_TIMEOUT 500
2941 u32 sliport_status;
2942 int status = 0, i;
2943
2944 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2945 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2946 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2947 break;
2948
2949 msleep(20);
2950 }
2951
2952 if (i == SLIPORT_READY_TIMEOUT)
2953 status = -1;
2954
2955 return status;
2956}
2957
2958static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2959{
2960 int status;
2961 u32 sliport_status, err, reset_needed;
2962 status = lancer_wait_ready(adapter);
2963 if (!status) {
2964 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2965 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2966 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2967 if (err && reset_needed) {
2968 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2969 adapter->db + SLIPORT_CONTROL_OFFSET);
2970
2971 /* check adapter has corrected the error */
2972 status = lancer_wait_ready(adapter);
2973 sliport_status = ioread32(adapter->db +
2974 SLIPORT_STATUS_OFFSET);
2975 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2976 SLIPORT_STATUS_RN_MASK);
2977 if (status || sliport_status)
2978 status = -1;
2979 } else if (err || reset_needed) {
2980 status = -1;
2981 }
2982 }
2983 return status;
2984}
2985
2986static int __devinit be_probe(struct pci_dev *pdev,
2987 const struct pci_device_id *pdev_id)
2988{
2989 int status = 0;
2990 struct be_adapter *adapter;
2991 struct net_device *netdev;
2992
2993 status = pci_enable_device(pdev);
2994 if (status)
2995 goto do_none;
2996
2997 status = pci_request_regions(pdev, DRV_NAME);
2998 if (status)
2999 goto disable_dev;
3000 pci_set_master(pdev);
3001
3002 netdev = alloc_etherdev(sizeof(struct be_adapter));
3003 if (netdev == NULL) {
3004 status = -ENOMEM;
3005 goto rel_reg;
3006 }
3007 adapter = netdev_priv(netdev);
3008 adapter->pdev = pdev;
3009 pci_set_drvdata(pdev, adapter);
3010
3011 status = be_dev_family_check(adapter);
3012 if (status)
3013 goto free_netdev;
3014
3015 adapter->netdev = netdev;
3016 SET_NETDEV_DEV(netdev, &pdev->dev);
3017
3018 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3019 if (!status) {
3020 netdev->features |= NETIF_F_HIGHDMA;
3021 } else {
3022 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3023 if (status) {
3024 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3025 goto free_netdev;
3026 }
3027 }
3028
3029 be_sriov_enable(adapter);
3030 if (adapter->sriov_enabled) {
3031 adapter->vf_cfg = kcalloc(num_vfs,
3032 sizeof(struct be_vf_cfg), GFP_KERNEL);
3033
3034 if (!adapter->vf_cfg)
3035 goto free_netdev;
3036 }
3037
3038 status = be_ctrl_init(adapter);
3039 if (status)
3040 goto free_vf_cfg;
3041
3042 if (lancer_chip(adapter)) {
3043 status = lancer_test_and_set_rdy_state(adapter);
3044 if (status) {
3045 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3046 goto ctrl_clean;
3047 }
3048 }
3049
3050 /* sync up with fw's ready state */
3051 if (be_physfn(adapter)) {
3052 status = be_cmd_POST(adapter);
3053 if (status)
3054 goto ctrl_clean;
3055 }
3056
3057 /* tell fw we're ready to fire cmds */
3058 status = be_cmd_fw_init(adapter);
3059 if (status)
3060 goto ctrl_clean;
3061
3062 status = be_cmd_reset_function(adapter);
3063 if (status)
3064 goto ctrl_clean;
3065
3066 status = be_stats_init(adapter);
3067 if (status)
3068 goto ctrl_clean;
3069
3070 status = be_get_config(adapter);
3071 if (status)
3072 goto stats_clean;
3073
3074 be_msix_enable(adapter);
3075
3076 INIT_DELAYED_WORK(&adapter->work, be_worker);
3077
3078 status = be_setup(adapter);
3079 if (status)
3080 goto msix_disable;
3081
3082 be_netdev_init(netdev);
3083 status = register_netdev(netdev);
3084 if (status != 0)
3085 goto unsetup;
3086 netif_carrier_off(netdev);
3087
3088 if (be_physfn(adapter) && adapter->sriov_enabled) {
3089 u8 mac_speed;
3090 bool link_up;
3091 u16 vf, lnk_speed;
3092
3093 status = be_vf_eth_addr_config(adapter);
3094 if (status)
3095 goto unreg_netdev;
3096
3097 for (vf = 0; vf < num_vfs; vf++) {
3098 status = be_cmd_link_status_query(adapter, &link_up,
3099 &mac_speed, &lnk_speed, vf + 1);
3100 if (!status)
3101 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3102 else
3103 goto unreg_netdev;
3104 }
3105 }
3106
3107 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3108 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3109 return 0;
3110
3111unreg_netdev:
3112 unregister_netdev(netdev);
3113unsetup:
3114 be_clear(adapter);
3115msix_disable:
3116 be_msix_disable(adapter);
3117stats_clean:
3118 be_stats_cleanup(adapter);
3119ctrl_clean:
3120 be_ctrl_cleanup(adapter);
3121free_vf_cfg:
3122 kfree(adapter->vf_cfg);
3123free_netdev:
3124 be_sriov_disable(adapter);
3125 free_netdev(netdev);
3126 pci_set_drvdata(pdev, NULL);
3127rel_reg:
3128 pci_release_regions(pdev);
3129disable_dev:
3130 pci_disable_device(pdev);
3131do_none:
3132 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3133 return status;
3134}
3135
3136static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3137{
3138 struct be_adapter *adapter = pci_get_drvdata(pdev);
3139 struct net_device *netdev = adapter->netdev;
3140
3141 cancel_delayed_work_sync(&adapter->work);
3142 if (adapter->wol)
3143 be_setup_wol(adapter, true);
3144
3145 netif_device_detach(netdev);
3146 if (netif_running(netdev)) {
3147 rtnl_lock();
3148 be_close(netdev);
3149 rtnl_unlock();
3150 }
3151 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3152 be_clear(adapter);
3153
3154 be_msix_disable(adapter);
3155 pci_save_state(pdev);
3156 pci_disable_device(pdev);
3157 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3158 return 0;
3159}
3160
3161static int be_resume(struct pci_dev *pdev)
3162{
3163 int status = 0;
3164 struct be_adapter *adapter = pci_get_drvdata(pdev);
3165 struct net_device *netdev = adapter->netdev;
3166
3167 netif_device_detach(netdev);
3168
3169 status = pci_enable_device(pdev);
3170 if (status)
3171 return status;
3172
3173 pci_set_power_state(pdev, 0);
3174 pci_restore_state(pdev);
3175
3176 be_msix_enable(adapter);
3177 /* tell fw we're ready to fire cmds */
3178 status = be_cmd_fw_init(adapter);
3179 if (status)
3180 return status;
3181
3182 be_setup(adapter);
3183 if (netif_running(netdev)) {
3184 rtnl_lock();
3185 be_open(netdev);
3186 rtnl_unlock();
3187 }
3188 netif_device_attach(netdev);
3189
3190 if (adapter->wol)
3191 be_setup_wol(adapter, false);
3192
3193 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3194 return 0;
3195}
3196
3197/*
3198 * An FLR will stop BE from DMAing any data.
3199 */
3200static void be_shutdown(struct pci_dev *pdev)
3201{
3202 struct be_adapter *adapter = pci_get_drvdata(pdev);
3203
3204 if (!adapter)
3205 return;
3206
3207 cancel_delayed_work_sync(&adapter->work);
3208
3209 netif_device_detach(adapter->netdev);
3210
3211 if (adapter->wol)
3212 be_setup_wol(adapter, true);
3213
3214 be_cmd_reset_function(adapter);
3215
3216 pci_disable_device(pdev);
3217}
3218
3219static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3220 pci_channel_state_t state)
3221{
3222 struct be_adapter *adapter = pci_get_drvdata(pdev);
3223 struct net_device *netdev = adapter->netdev;
3224
3225 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3226
3227 adapter->eeh_err = true;
3228
3229 netif_device_detach(netdev);
3230
3231 if (netif_running(netdev)) {
3232 rtnl_lock();
3233 be_close(netdev);
3234 rtnl_unlock();
3235 }
3236 be_clear(adapter);
3237
3238 if (state == pci_channel_io_perm_failure)
3239 return PCI_ERS_RESULT_DISCONNECT;
3240
3241 pci_disable_device(pdev);
3242
3243 return PCI_ERS_RESULT_NEED_RESET;
3244}
3245
3246static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3247{
3248 struct be_adapter *adapter = pci_get_drvdata(pdev);
3249 int status;
3250
3251 dev_info(&adapter->pdev->dev, "EEH reset\n");
3252 adapter->eeh_err = false;
3253
3254 status = pci_enable_device(pdev);
3255 if (status)
3256 return PCI_ERS_RESULT_DISCONNECT;
3257
3258 pci_set_master(pdev);
3259 pci_set_power_state(pdev, 0);
3260 pci_restore_state(pdev);
3261
3262 /* Check if card is ok and fw is ready */
3263 status = be_cmd_POST(adapter);
3264 if (status)
3265 return PCI_ERS_RESULT_DISCONNECT;
3266
3267 return PCI_ERS_RESULT_RECOVERED;
3268}
3269
3270static void be_eeh_resume(struct pci_dev *pdev)
3271{
3272 int status = 0;
3273 struct be_adapter *adapter = pci_get_drvdata(pdev);
3274 struct net_device *netdev = adapter->netdev;
3275
3276 dev_info(&adapter->pdev->dev, "EEH resume\n");
3277
3278 pci_save_state(pdev);
3279
3280 /* tell fw we're ready to fire cmds */
3281 status = be_cmd_fw_init(adapter);
3282 if (status)
3283 goto err;
3284
3285 status = be_setup(adapter);
3286 if (status)
3287 goto err;
3288
3289 if (netif_running(netdev)) {
3290 status = be_open(netdev);
3291 if (status)
3292 goto err;
3293 }
3294 netif_device_attach(netdev);
3295 return;
3296err:
3297 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3298}
3299
3300static struct pci_error_handlers be_eeh_handlers = {
3301 .error_detected = be_eeh_err_detected,
3302 .slot_reset = be_eeh_reset,
3303 .resume = be_eeh_resume,
3304};
3305
3306static struct pci_driver be_driver = {
3307 .name = DRV_NAME,
3308 .id_table = be_dev_ids,
3309 .probe = be_probe,
3310 .remove = be_remove,
3311 .suspend = be_suspend,
3312 .resume = be_resume,
3313 .shutdown = be_shutdown,
3314 .err_handler = &be_eeh_handlers
3315};
3316
3317static int __init be_init_module(void)
3318{
3319 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3320 rx_frag_size != 2048) {
3321 printk(KERN_WARNING DRV_NAME
3322 " : Module param rx_frag_size must be 2048/4096/8192."
3323 " Using 2048\n");
3324 rx_frag_size = 2048;
3325 }
3326
3327 return pci_register_driver(&be_driver);
3328}
3329module_init(be_init_module);
3330
3331static void __exit be_exit_module(void)
3332{
3333 pci_unregister_driver(&be_driver);
3334}
3335module_exit(be_exit_module);
This page took 0.03954 seconds and 5 git commands to generate.