Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-next-2.6
[deliverable/linux.git] / drivers / net / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117 };
118
119 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120 {
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
125 }
126
127 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129 {
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142 }
143
144 static void be_intr_set(struct be_adapter *adapter, bool enable)
145 {
146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
149
150 if (adapter->eeh_err)
151 return;
152
153 if (!enabled && enable)
154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 else if (enabled && !enable)
156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157 else
158 return;
159
160 iowrite32(reg, addr);
161 }
162
163 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
164 {
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
168
169 wmb();
170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
171 }
172
173 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
174 {
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
178
179 wmb();
180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
181 }
182
183 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
184 bool arm, bool clear_int, u16 num_popped)
185 {
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
190
191 if (adapter->eeh_err)
192 return;
193
194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
201 }
202
203 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
204 {
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
209
210 if (adapter->eeh_err)
211 return;
212
213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
217 }
218
219 static int be_mac_addr_set(struct net_device *netdev, void *p)
220 {
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
236 if (status)
237 return status;
238
239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
240 adapter->if_handle, &adapter->pmac_id, 0);
241 netdev_addr:
242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246 }
247
248 void netdev_stats_update(struct be_adapter *adapter)
249 {
250 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
251 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252 struct be_port_rxf_stats *port_stats =
253 &rxf_stats->port[adapter->port_num];
254 struct net_device_stats *dev_stats = &adapter->netdev->stats;
255 struct be_erx_stats *erx_stats = &hw_stats->erx;
256 struct be_rx_obj *rxo;
257 int i;
258
259 memset(dev_stats, 0, sizeof(*dev_stats));
260 for_all_rx_queues(adapter, rxo, i) {
261 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264 /* no space in linux buffers: best possible approximation */
265 dev_stats->rx_dropped +=
266 erx_stats->rx_drops_no_fragments[rxo->q.id];
267 }
268
269 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
271
272 /* bad pkts received */
273 dev_stats->rx_errors = port_stats->rx_crc_errors +
274 port_stats->rx_alignment_symbol_errors +
275 port_stats->rx_in_range_errors +
276 port_stats->rx_out_range_errors +
277 port_stats->rx_frame_too_long +
278 port_stats->rx_dropped_too_small +
279 port_stats->rx_dropped_too_short +
280 port_stats->rx_dropped_header_too_small +
281 port_stats->rx_dropped_tcp_length +
282 port_stats->rx_dropped_runt +
283 port_stats->rx_tcp_checksum_errs +
284 port_stats->rx_ip_checksum_errs +
285 port_stats->rx_udp_checksum_errs;
286
287 /* detailed rx errors */
288 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
289 port_stats->rx_out_range_errors +
290 port_stats->rx_frame_too_long;
291
292 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
293
294 /* frame alignment errors */
295 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
296
297 /* receiver fifo overrun */
298 /* drops_no_pbuf is no per i/f, it's per BE card */
299 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300 port_stats->rx_input_fifo_overflow +
301 rxf_stats->rx_drops_no_pbuf;
302 }
303
304 void be_link_status_update(struct be_adapter *adapter, bool link_up)
305 {
306 struct net_device *netdev = adapter->netdev;
307
308 /* If link came up or went down */
309 if (adapter->link_up != link_up) {
310 adapter->link_speed = -1;
311 if (link_up) {
312 netif_carrier_on(netdev);
313 printk(KERN_INFO "%s: Link up\n", netdev->name);
314 } else {
315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
317 }
318 adapter->link_up = link_up;
319 }
320 }
321
322 /* Update the EQ delay n BE based on the RX frags consumed / sec */
323 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
324 {
325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
327 ulong now = jiffies;
328 u32 eqd;
329
330 if (!rx_eq->enable_aic)
331 return;
332
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
337 }
338
339 /* Update once a second */
340 if ((now - stats->rx_fps_jiffies) < HZ)
341 return;
342
343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
344 ((now - stats->rx_fps_jiffies) / HZ);
345
346 stats->rx_fps_jiffies = now;
347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
358
359 rx_eq->cur_eqd = eqd;
360 }
361
362 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363 {
364 u64 rate = bytes;
365
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
369
370 return rate;
371 }
372
373 static void be_tx_rate_update(struct be_adapter *adapter)
374 {
375 struct be_tx_stats *stats = tx_stats(adapter);
376 ulong now = jiffies;
377
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
382 }
383
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391 }
392 }
393
394 static void be_tx_stats_update(struct be_adapter *adapter,
395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
396 {
397 struct be_tx_stats *stats = tx_stats(adapter);
398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
402 if (stopped)
403 stats->be_tx_stops++;
404 }
405
406 /* Determine number of WRB entries needed to xmit data in an skb */
407 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
408 bool *dummy)
409 {
410 int cnt = (skb->len > skb->data_len);
411
412 cnt += skb_shinfo(skb)->nr_frags;
413
414 /* to account for hdr wrb */
415 cnt++;
416 if (lancer_chip(adapter) || !(cnt & 1)) {
417 *dummy = false;
418 } else {
419 /* add a dummy to make it an even num */
420 cnt++;
421 *dummy = true;
422 }
423 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
424 return cnt;
425 }
426
427 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
428 {
429 wrb->frag_pa_hi = upper_32_bits(addr);
430 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432 }
433
434 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435 struct sk_buff *skb, u32 wrb_cnt, u32 len)
436 {
437 u8 vlan_prio = 0;
438 u16 vlan_tag = 0;
439
440 memset(hdr, 0, sizeof(*hdr));
441
442 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
443
444 if (skb_is_gso(skb)) {
445 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447 hdr, skb_shinfo(skb)->gso_size);
448 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
450 if (lancer_chip(adapter) && adapter->sli_family ==
451 LANCER_A0_SLI_FAMILY) {
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
453 if (is_tcp_pkt(skb))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
455 tcpcs, hdr, 1);
456 else if (is_udp_pkt(skb))
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458 udpcs, hdr, 1);
459 }
460 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
461 if (is_tcp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463 else if (is_udp_pkt(skb))
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
465 }
466
467 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
469 vlan_tag = vlan_tx_tag_get(skb);
470 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471 /* If vlan priority provided by OS is NOT in available bmap */
472 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474 adapter->recommended_prio;
475 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
476 }
477
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
482 }
483
484 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
485 bool unmap_single)
486 {
487 dma_addr_t dma;
488
489 be_dws_le_to_cpu(wrb, sizeof(*wrb));
490
491 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
492 if (wrb->frag_len) {
493 if (unmap_single)
494 dma_unmap_single(dev, dma, wrb->frag_len,
495 DMA_TO_DEVICE);
496 else
497 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
498 }
499 }
500
501 static int make_tx_wrbs(struct be_adapter *adapter,
502 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
503 {
504 dma_addr_t busaddr;
505 int i, copied = 0;
506 struct device *dev = &adapter->pdev->dev;
507 struct sk_buff *first_skb = skb;
508 struct be_queue_info *txq = &adapter->tx_obj.q;
509 struct be_eth_wrb *wrb;
510 struct be_eth_hdr_wrb *hdr;
511 bool map_single = false;
512 u16 map_head;
513
514 hdr = queue_head_node(txq);
515 queue_head_inc(txq);
516 map_head = txq->head;
517
518 if (skb->len > skb->data_len) {
519 int len = skb_headlen(skb);
520 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521 if (dma_mapping_error(dev, busaddr))
522 goto dma_err;
523 map_single = true;
524 wrb = queue_head_node(txq);
525 wrb_fill(wrb, busaddr, len);
526 be_dws_cpu_to_le(wrb, sizeof(*wrb));
527 queue_head_inc(txq);
528 copied += len;
529 }
530
531 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532 struct skb_frag_struct *frag =
533 &skb_shinfo(skb)->frags[i];
534 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535 frag->size, DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, busaddr))
537 goto dma_err;
538 wrb = queue_head_node(txq);
539 wrb_fill(wrb, busaddr, frag->size);
540 be_dws_cpu_to_le(wrb, sizeof(*wrb));
541 queue_head_inc(txq);
542 copied += frag->size;
543 }
544
545 if (dummy_wrb) {
546 wrb = queue_head_node(txq);
547 wrb_fill(wrb, 0, 0);
548 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549 queue_head_inc(txq);
550 }
551
552 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
553 be_dws_cpu_to_le(hdr, sizeof(*hdr));
554
555 return copied;
556 dma_err:
557 txq->head = map_head;
558 while (copied) {
559 wrb = queue_head_node(txq);
560 unmap_tx_frag(dev, wrb, map_single);
561 map_single = false;
562 copied -= wrb->frag_len;
563 queue_head_inc(txq);
564 }
565 return 0;
566 }
567
568 static netdev_tx_t be_xmit(struct sk_buff *skb,
569 struct net_device *netdev)
570 {
571 struct be_adapter *adapter = netdev_priv(netdev);
572 struct be_tx_obj *tx_obj = &adapter->tx_obj;
573 struct be_queue_info *txq = &tx_obj->q;
574 u32 wrb_cnt = 0, copied = 0;
575 u32 start = txq->head;
576 bool dummy_wrb, stopped = false;
577
578 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
579
580 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
581 if (copied) {
582 /* record the sent skb in the sent_skb table */
583 BUG_ON(tx_obj->sent_skb_list[start]);
584 tx_obj->sent_skb_list[start] = skb;
585
586 /* Ensure txq has space for the next skb; Else stop the queue
587 * *BEFORE* ringing the tx doorbell, so that we serialze the
588 * tx compls of the current transmit which'll wake up the queue
589 */
590 atomic_add(wrb_cnt, &txq->used);
591 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
592 txq->len) {
593 netif_stop_queue(netdev);
594 stopped = true;
595 }
596
597 be_txq_notify(adapter, txq->id, wrb_cnt);
598
599 be_tx_stats_update(adapter, wrb_cnt, copied,
600 skb_shinfo(skb)->gso_segs, stopped);
601 } else {
602 txq->head = start;
603 dev_kfree_skb_any(skb);
604 }
605 return NETDEV_TX_OK;
606 }
607
608 static int be_change_mtu(struct net_device *netdev, int new_mtu)
609 {
610 struct be_adapter *adapter = netdev_priv(netdev);
611 if (new_mtu < BE_MIN_MTU ||
612 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613 (ETH_HLEN + ETH_FCS_LEN))) {
614 dev_info(&adapter->pdev->dev,
615 "MTU must be between %d and %d bytes\n",
616 BE_MIN_MTU,
617 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
618 return -EINVAL;
619 }
620 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621 netdev->mtu, new_mtu);
622 netdev->mtu = new_mtu;
623 return 0;
624 }
625
626 /*
627 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628 * If the user configures more, place BE in vlan promiscuous mode.
629 */
630 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
631 {
632 u16 vtag[BE_NUM_VLANS_SUPPORTED];
633 u16 ntags = 0, i;
634 int status = 0;
635 u32 if_handle;
636
637 if (vf) {
638 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
641 }
642
643 if (adapter->vlans_added <= adapter->max_vlans) {
644 /* Construct VLAN Table to give to HW */
645 for (i = 0; i < VLAN_N_VID; i++) {
646 if (adapter->vlan_tag[i]) {
647 vtag[ntags] = cpu_to_le16(i);
648 ntags++;
649 }
650 }
651 status = be_cmd_vlan_config(adapter, adapter->if_handle,
652 vtag, ntags, 1, 0);
653 } else {
654 status = be_cmd_vlan_config(adapter, adapter->if_handle,
655 NULL, 0, 1, 1);
656 }
657
658 return status;
659 }
660
661 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
662 {
663 struct be_adapter *adapter = netdev_priv(netdev);
664
665 adapter->vlan_grp = grp;
666 }
667
668 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
669 {
670 struct be_adapter *adapter = netdev_priv(netdev);
671
672 adapter->vlans_added++;
673 if (!be_physfn(adapter))
674 return;
675
676 adapter->vlan_tag[vid] = 1;
677 if (adapter->vlans_added <= (adapter->max_vlans + 1))
678 be_vid_config(adapter, false, 0);
679 }
680
681 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
682 {
683 struct be_adapter *adapter = netdev_priv(netdev);
684
685 adapter->vlans_added--;
686 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
687
688 if (!be_physfn(adapter))
689 return;
690
691 adapter->vlan_tag[vid] = 0;
692 if (adapter->vlans_added <= adapter->max_vlans)
693 be_vid_config(adapter, false, 0);
694 }
695
696 static void be_set_multicast_list(struct net_device *netdev)
697 {
698 struct be_adapter *adapter = netdev_priv(netdev);
699
700 if (netdev->flags & IFF_PROMISC) {
701 be_cmd_promiscuous_config(adapter, true);
702 adapter->promiscuous = true;
703 goto done;
704 }
705
706 /* BE was previously in promiscuous mode; disable it */
707 if (adapter->promiscuous) {
708 adapter->promiscuous = false;
709 be_cmd_promiscuous_config(adapter, false);
710 }
711
712 /* Enable multicast promisc if num configured exceeds what we support */
713 if (netdev->flags & IFF_ALLMULTI ||
714 netdev_mc_count(netdev) > BE_MAX_MC) {
715 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
716 &adapter->mc_cmd_mem);
717 goto done;
718 }
719
720 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
721 &adapter->mc_cmd_mem);
722 done:
723 return;
724 }
725
726 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
727 {
728 struct be_adapter *adapter = netdev_priv(netdev);
729 int status;
730
731 if (!adapter->sriov_enabled)
732 return -EPERM;
733
734 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
735 return -EINVAL;
736
737 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738 status = be_cmd_pmac_del(adapter,
739 adapter->vf_cfg[vf].vf_if_handle,
740 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
741
742 status = be_cmd_pmac_add(adapter, mac,
743 adapter->vf_cfg[vf].vf_if_handle,
744 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
745
746 if (status)
747 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748 mac, vf);
749 else
750 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
751
752 return status;
753 }
754
755 static int be_get_vf_config(struct net_device *netdev, int vf,
756 struct ifla_vf_info *vi)
757 {
758 struct be_adapter *adapter = netdev_priv(netdev);
759
760 if (!adapter->sriov_enabled)
761 return -EPERM;
762
763 if (vf >= num_vfs)
764 return -EINVAL;
765
766 vi->vf = vf;
767 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
768 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
769 vi->qos = 0;
770 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
771
772 return 0;
773 }
774
775 static int be_set_vf_vlan(struct net_device *netdev,
776 int vf, u16 vlan, u8 qos)
777 {
778 struct be_adapter *adapter = netdev_priv(netdev);
779 int status = 0;
780
781 if (!adapter->sriov_enabled)
782 return -EPERM;
783
784 if ((vf >= num_vfs) || (vlan > 4095))
785 return -EINVAL;
786
787 if (vlan) {
788 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789 adapter->vlans_added++;
790 } else {
791 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792 adapter->vlans_added--;
793 }
794
795 status = be_vid_config(adapter, true, vf);
796
797 if (status)
798 dev_info(&adapter->pdev->dev,
799 "VLAN %d config on VF %d failed\n", vlan, vf);
800 return status;
801 }
802
803 static int be_set_vf_tx_rate(struct net_device *netdev,
804 int vf, int rate)
805 {
806 struct be_adapter *adapter = netdev_priv(netdev);
807 int status = 0;
808
809 if (!adapter->sriov_enabled)
810 return -EPERM;
811
812 if ((vf >= num_vfs) || (rate < 0))
813 return -EINVAL;
814
815 if (rate > 10000)
816 rate = 10000;
817
818 adapter->vf_cfg[vf].vf_tx_rate = rate;
819 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
820
821 if (status)
822 dev_info(&adapter->pdev->dev,
823 "tx rate %d on VF %d failed\n", rate, vf);
824 return status;
825 }
826
827 static void be_rx_rate_update(struct be_rx_obj *rxo)
828 {
829 struct be_rx_stats *stats = &rxo->stats;
830 ulong now = jiffies;
831
832 /* Wrapped around */
833 if (time_before(now, stats->rx_jiffies)) {
834 stats->rx_jiffies = now;
835 return;
836 }
837
838 /* Update the rate once in two seconds */
839 if ((now - stats->rx_jiffies) < 2 * HZ)
840 return;
841
842 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843 now - stats->rx_jiffies);
844 stats->rx_jiffies = now;
845 stats->rx_bytes_prev = stats->rx_bytes;
846 }
847
848 static void be_rx_stats_update(struct be_rx_obj *rxo,
849 struct be_rx_compl_info *rxcp)
850 {
851 struct be_rx_stats *stats = &rxo->stats;
852
853 stats->rx_compl++;
854 stats->rx_frags += rxcp->num_rcvd;
855 stats->rx_bytes += rxcp->pkt_size;
856 stats->rx_pkts++;
857 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
858 stats->rx_mcast_pkts++;
859 if (rxcp->err)
860 stats->rxcp_err++;
861 }
862
863 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
864 {
865 /* L4 checksum is not reliable for non TCP/UDP packets.
866 * Also ignore ipcksm for ipv6 pkts */
867 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868 (rxcp->ip_csum || rxcp->ipv6);
869 }
870
871 static struct be_rx_page_info *
872 get_rx_page_info(struct be_adapter *adapter,
873 struct be_rx_obj *rxo,
874 u16 frag_idx)
875 {
876 struct be_rx_page_info *rx_page_info;
877 struct be_queue_info *rxq = &rxo->q;
878
879 rx_page_info = &rxo->page_info_tbl[frag_idx];
880 BUG_ON(!rx_page_info->page);
881
882 if (rx_page_info->last_page_user) {
883 dma_unmap_page(&adapter->pdev->dev,
884 dma_unmap_addr(rx_page_info, bus),
885 adapter->big_page_size, DMA_FROM_DEVICE);
886 rx_page_info->last_page_user = false;
887 }
888
889 atomic_dec(&rxq->used);
890 return rx_page_info;
891 }
892
893 /* Throwaway the data in the Rx completion */
894 static void be_rx_compl_discard(struct be_adapter *adapter,
895 struct be_rx_obj *rxo,
896 struct be_rx_compl_info *rxcp)
897 {
898 struct be_queue_info *rxq = &rxo->q;
899 struct be_rx_page_info *page_info;
900 u16 i, num_rcvd = rxcp->num_rcvd;
901
902 for (i = 0; i < num_rcvd; i++) {
903 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
904 put_page(page_info->page);
905 memset(page_info, 0, sizeof(*page_info));
906 index_inc(&rxcp->rxq_idx, rxq->len);
907 }
908 }
909
910 /*
911 * skb_fill_rx_data forms a complete skb for an ether frame
912 * indicated by rxcp.
913 */
914 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
915 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
916 {
917 struct be_queue_info *rxq = &rxo->q;
918 struct be_rx_page_info *page_info;
919 u16 i, j;
920 u16 hdr_len, curr_frag_len, remaining;
921 u8 *start;
922
923 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
924 start = page_address(page_info->page) + page_info->page_offset;
925 prefetch(start);
926
927 /* Copy data in the first descriptor of this completion */
928 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
929
930 /* Copy the header portion into skb_data */
931 hdr_len = min(BE_HDR_LEN, curr_frag_len);
932 memcpy(skb->data, start, hdr_len);
933 skb->len = curr_frag_len;
934 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935 /* Complete packet has now been moved to data */
936 put_page(page_info->page);
937 skb->data_len = 0;
938 skb->tail += curr_frag_len;
939 } else {
940 skb_shinfo(skb)->nr_frags = 1;
941 skb_shinfo(skb)->frags[0].page = page_info->page;
942 skb_shinfo(skb)->frags[0].page_offset =
943 page_info->page_offset + hdr_len;
944 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945 skb->data_len = curr_frag_len - hdr_len;
946 skb->tail += hdr_len;
947 }
948 page_info->page = NULL;
949
950 if (rxcp->pkt_size <= rx_frag_size) {
951 BUG_ON(rxcp->num_rcvd != 1);
952 return;
953 }
954
955 /* More frags present for this completion */
956 index_inc(&rxcp->rxq_idx, rxq->len);
957 remaining = rxcp->pkt_size - curr_frag_len;
958 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960 curr_frag_len = min(remaining, rx_frag_size);
961
962 /* Coalesce all frags from the same physical page in one slot */
963 if (page_info->page_offset == 0) {
964 /* Fresh page */
965 j++;
966 skb_shinfo(skb)->frags[j].page = page_info->page;
967 skb_shinfo(skb)->frags[j].page_offset =
968 page_info->page_offset;
969 skb_shinfo(skb)->frags[j].size = 0;
970 skb_shinfo(skb)->nr_frags++;
971 } else {
972 put_page(page_info->page);
973 }
974
975 skb_shinfo(skb)->frags[j].size += curr_frag_len;
976 skb->len += curr_frag_len;
977 skb->data_len += curr_frag_len;
978
979 remaining -= curr_frag_len;
980 index_inc(&rxcp->rxq_idx, rxq->len);
981 page_info->page = NULL;
982 }
983 BUG_ON(j > MAX_SKB_FRAGS);
984 }
985
986 /* Process the RX completion indicated by rxcp when GRO is disabled */
987 static void be_rx_compl_process(struct be_adapter *adapter,
988 struct be_rx_obj *rxo,
989 struct be_rx_compl_info *rxcp)
990 {
991 struct net_device *netdev = adapter->netdev;
992 struct sk_buff *skb;
993
994 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
995 if (unlikely(!skb)) {
996 if (net_ratelimit())
997 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
998 be_rx_compl_discard(adapter, rxo, rxcp);
999 return;
1000 }
1001
1002 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1003
1004 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1005 skb->ip_summed = CHECKSUM_UNNECESSARY;
1006 else
1007 skb_checksum_none_assert(skb);
1008
1009 skb->truesize = skb->len + sizeof(struct sk_buff);
1010 skb->protocol = eth_type_trans(skb, netdev);
1011 if (adapter->netdev->features & NETIF_F_RXHASH)
1012 skb->rxhash = rxcp->rss_hash;
1013
1014
1015 if (unlikely(rxcp->vlanf)) {
1016 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1017 kfree_skb(skb);
1018 return;
1019 }
1020 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1021 rxcp->vlan_tag);
1022 } else {
1023 netif_receive_skb(skb);
1024 }
1025 }
1026
1027 /* Process the RX completion indicated by rxcp when GRO is enabled */
1028 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1029 struct be_rx_obj *rxo,
1030 struct be_rx_compl_info *rxcp)
1031 {
1032 struct be_rx_page_info *page_info;
1033 struct sk_buff *skb = NULL;
1034 struct be_queue_info *rxq = &rxo->q;
1035 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1036 u16 remaining, curr_frag_len;
1037 u16 i, j;
1038
1039 skb = napi_get_frags(&eq_obj->napi);
1040 if (!skb) {
1041 be_rx_compl_discard(adapter, rxo, rxcp);
1042 return;
1043 }
1044
1045 remaining = rxcp->pkt_size;
1046 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1047 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1048
1049 curr_frag_len = min(remaining, rx_frag_size);
1050
1051 /* Coalesce all frags from the same physical page in one slot */
1052 if (i == 0 || page_info->page_offset == 0) {
1053 /* First frag or Fresh page */
1054 j++;
1055 skb_shinfo(skb)->frags[j].page = page_info->page;
1056 skb_shinfo(skb)->frags[j].page_offset =
1057 page_info->page_offset;
1058 skb_shinfo(skb)->frags[j].size = 0;
1059 } else {
1060 put_page(page_info->page);
1061 }
1062 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1063
1064 remaining -= curr_frag_len;
1065 index_inc(&rxcp->rxq_idx, rxq->len);
1066 memset(page_info, 0, sizeof(*page_info));
1067 }
1068 BUG_ON(j > MAX_SKB_FRAGS);
1069
1070 skb_shinfo(skb)->nr_frags = j + 1;
1071 skb->len = rxcp->pkt_size;
1072 skb->data_len = rxcp->pkt_size;
1073 skb->truesize += rxcp->pkt_size;
1074 skb->ip_summed = CHECKSUM_UNNECESSARY;
1075 if (adapter->netdev->features & NETIF_F_RXHASH)
1076 skb->rxhash = rxcp->rss_hash;
1077
1078 if (likely(!rxcp->vlanf))
1079 napi_gro_frags(&eq_obj->napi);
1080 else
1081 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1082 rxcp->vlan_tag);
1083 }
1084
1085 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1086 struct be_eth_rx_compl *compl,
1087 struct be_rx_compl_info *rxcp)
1088 {
1089 rxcp->pkt_size =
1090 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1091 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1092 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1093 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1094 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1095 rxcp->ip_csum =
1096 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1097 rxcp->l4_csum =
1098 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1099 rxcp->ipv6 =
1100 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1101 rxcp->rxq_idx =
1102 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1103 rxcp->num_rcvd =
1104 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1105 rxcp->pkt_type =
1106 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1107 rxcp->rss_hash =
1108 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1109 if (rxcp->vlanf) {
1110 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1111 compl);
1112 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1113 compl);
1114 }
1115 }
1116
1117 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1118 struct be_eth_rx_compl *compl,
1119 struct be_rx_compl_info *rxcp)
1120 {
1121 rxcp->pkt_size =
1122 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1123 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1124 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1125 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1126 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1127 rxcp->ip_csum =
1128 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1129 rxcp->l4_csum =
1130 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1131 rxcp->ipv6 =
1132 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1133 rxcp->rxq_idx =
1134 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1135 rxcp->num_rcvd =
1136 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1137 rxcp->pkt_type =
1138 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1139 rxcp->rss_hash =
1140 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1141 if (rxcp->vlanf) {
1142 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1143 compl);
1144 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1145 compl);
1146 }
1147 }
1148
1149 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1150 {
1151 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1152 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1153 struct be_adapter *adapter = rxo->adapter;
1154
1155 /* For checking the valid bit it is Ok to use either definition as the
1156 * valid bit is at the same position in both v0 and v1 Rx compl */
1157 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1158 return NULL;
1159
1160 rmb();
1161 be_dws_le_to_cpu(compl, sizeof(*compl));
1162
1163 if (adapter->be3_native)
1164 be_parse_rx_compl_v1(adapter, compl, rxcp);
1165 else
1166 be_parse_rx_compl_v0(adapter, compl, rxcp);
1167
1168 if (rxcp->vlanf) {
1169 /* vlanf could be wrongly set in some cards.
1170 * ignore if vtm is not set */
1171 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1172 rxcp->vlanf = 0;
1173
1174 if (!lancer_chip(adapter))
1175 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1176
1177 if (((adapter->pvid & VLAN_VID_MASK) ==
1178 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1179 !adapter->vlan_tag[rxcp->vlan_tag])
1180 rxcp->vlanf = 0;
1181 }
1182
1183 /* As the compl has been parsed, reset it; we wont touch it again */
1184 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1185
1186 queue_tail_inc(&rxo->cq);
1187 return rxcp;
1188 }
1189
1190 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1191 {
1192 u32 order = get_order(size);
1193
1194 if (order > 0)
1195 gfp |= __GFP_COMP;
1196 return alloc_pages(gfp, order);
1197 }
1198
1199 /*
1200 * Allocate a page, split it to fragments of size rx_frag_size and post as
1201 * receive buffers to BE
1202 */
1203 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1204 {
1205 struct be_adapter *adapter = rxo->adapter;
1206 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1207 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1208 struct be_queue_info *rxq = &rxo->q;
1209 struct page *pagep = NULL;
1210 struct be_eth_rx_d *rxd;
1211 u64 page_dmaaddr = 0, frag_dmaaddr;
1212 u32 posted, page_offset = 0;
1213
1214 page_info = &rxo->page_info_tbl[rxq->head];
1215 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1216 if (!pagep) {
1217 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1218 if (unlikely(!pagep)) {
1219 rxo->stats.rx_post_fail++;
1220 break;
1221 }
1222 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1223 0, adapter->big_page_size,
1224 DMA_FROM_DEVICE);
1225 page_info->page_offset = 0;
1226 } else {
1227 get_page(pagep);
1228 page_info->page_offset = page_offset + rx_frag_size;
1229 }
1230 page_offset = page_info->page_offset;
1231 page_info->page = pagep;
1232 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1233 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1234
1235 rxd = queue_head_node(rxq);
1236 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1237 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1238
1239 /* Any space left in the current big page for another frag? */
1240 if ((page_offset + rx_frag_size + rx_frag_size) >
1241 adapter->big_page_size) {
1242 pagep = NULL;
1243 page_info->last_page_user = true;
1244 }
1245
1246 prev_page_info = page_info;
1247 queue_head_inc(rxq);
1248 page_info = &page_info_tbl[rxq->head];
1249 }
1250 if (pagep)
1251 prev_page_info->last_page_user = true;
1252
1253 if (posted) {
1254 atomic_add(posted, &rxq->used);
1255 be_rxq_notify(adapter, rxq->id, posted);
1256 } else if (atomic_read(&rxq->used) == 0) {
1257 /* Let be_worker replenish when memory is available */
1258 rxo->rx_post_starved = true;
1259 }
1260 }
1261
1262 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1263 {
1264 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1265
1266 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1267 return NULL;
1268
1269 rmb();
1270 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1271
1272 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1273
1274 queue_tail_inc(tx_cq);
1275 return txcp;
1276 }
1277
1278 static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1279 {
1280 struct be_queue_info *txq = &adapter->tx_obj.q;
1281 struct be_eth_wrb *wrb;
1282 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1283 struct sk_buff *sent_skb;
1284 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1285 bool unmap_skb_hdr = true;
1286
1287 sent_skb = sent_skbs[txq->tail];
1288 BUG_ON(!sent_skb);
1289 sent_skbs[txq->tail] = NULL;
1290
1291 /* skip header wrb */
1292 queue_tail_inc(txq);
1293
1294 do {
1295 cur_index = txq->tail;
1296 wrb = queue_tail_node(txq);
1297 unmap_tx_frag(&adapter->pdev->dev, wrb,
1298 (unmap_skb_hdr && skb_headlen(sent_skb)));
1299 unmap_skb_hdr = false;
1300
1301 num_wrbs++;
1302 queue_tail_inc(txq);
1303 } while (cur_index != last_index);
1304
1305 kfree_skb(sent_skb);
1306 return num_wrbs;
1307 }
1308
1309 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1310 {
1311 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1312
1313 if (!eqe->evt)
1314 return NULL;
1315
1316 rmb();
1317 eqe->evt = le32_to_cpu(eqe->evt);
1318 queue_tail_inc(&eq_obj->q);
1319 return eqe;
1320 }
1321
1322 static int event_handle(struct be_adapter *adapter,
1323 struct be_eq_obj *eq_obj)
1324 {
1325 struct be_eq_entry *eqe;
1326 u16 num = 0;
1327
1328 while ((eqe = event_get(eq_obj)) != NULL) {
1329 eqe->evt = 0;
1330 num++;
1331 }
1332
1333 /* Deal with any spurious interrupts that come
1334 * without events
1335 */
1336 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1337 if (num)
1338 napi_schedule(&eq_obj->napi);
1339
1340 return num;
1341 }
1342
1343 /* Just read and notify events without processing them.
1344 * Used at the time of destroying event queues */
1345 static void be_eq_clean(struct be_adapter *adapter,
1346 struct be_eq_obj *eq_obj)
1347 {
1348 struct be_eq_entry *eqe;
1349 u16 num = 0;
1350
1351 while ((eqe = event_get(eq_obj)) != NULL) {
1352 eqe->evt = 0;
1353 num++;
1354 }
1355
1356 if (num)
1357 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1358 }
1359
1360 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1361 {
1362 struct be_rx_page_info *page_info;
1363 struct be_queue_info *rxq = &rxo->q;
1364 struct be_queue_info *rx_cq = &rxo->cq;
1365 struct be_rx_compl_info *rxcp;
1366 u16 tail;
1367
1368 /* First cleanup pending rx completions */
1369 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1370 be_rx_compl_discard(adapter, rxo, rxcp);
1371 be_cq_notify(adapter, rx_cq->id, false, 1);
1372 }
1373
1374 /* Then free posted rx buffer that were not used */
1375 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1376 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1377 page_info = get_rx_page_info(adapter, rxo, tail);
1378 put_page(page_info->page);
1379 memset(page_info, 0, sizeof(*page_info));
1380 }
1381 BUG_ON(atomic_read(&rxq->used));
1382 }
1383
1384 static void be_tx_compl_clean(struct be_adapter *adapter)
1385 {
1386 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1387 struct be_queue_info *txq = &adapter->tx_obj.q;
1388 struct be_eth_tx_compl *txcp;
1389 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1390 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1391 struct sk_buff *sent_skb;
1392 bool dummy_wrb;
1393
1394 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1395 do {
1396 while ((txcp = be_tx_compl_get(tx_cq))) {
1397 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1398 wrb_index, txcp);
1399 num_wrbs += be_tx_compl_process(adapter, end_idx);
1400 cmpl++;
1401 }
1402 if (cmpl) {
1403 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1404 atomic_sub(num_wrbs, &txq->used);
1405 cmpl = 0;
1406 num_wrbs = 0;
1407 }
1408
1409 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1410 break;
1411
1412 mdelay(1);
1413 } while (true);
1414
1415 if (atomic_read(&txq->used))
1416 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1417 atomic_read(&txq->used));
1418
1419 /* free posted tx for which compls will never arrive */
1420 while (atomic_read(&txq->used)) {
1421 sent_skb = sent_skbs[txq->tail];
1422 end_idx = txq->tail;
1423 index_adv(&end_idx,
1424 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1425 txq->len);
1426 num_wrbs = be_tx_compl_process(adapter, end_idx);
1427 atomic_sub(num_wrbs, &txq->used);
1428 }
1429 }
1430
1431 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1432 {
1433 struct be_queue_info *q;
1434
1435 q = &adapter->mcc_obj.q;
1436 if (q->created)
1437 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1438 be_queue_free(adapter, q);
1439
1440 q = &adapter->mcc_obj.cq;
1441 if (q->created)
1442 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1443 be_queue_free(adapter, q);
1444 }
1445
1446 /* Must be called only after TX qs are created as MCC shares TX EQ */
1447 static int be_mcc_queues_create(struct be_adapter *adapter)
1448 {
1449 struct be_queue_info *q, *cq;
1450
1451 /* Alloc MCC compl queue */
1452 cq = &adapter->mcc_obj.cq;
1453 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1454 sizeof(struct be_mcc_compl)))
1455 goto err;
1456
1457 /* Ask BE to create MCC compl queue; share TX's eq */
1458 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1459 goto mcc_cq_free;
1460
1461 /* Alloc MCC queue */
1462 q = &adapter->mcc_obj.q;
1463 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1464 goto mcc_cq_destroy;
1465
1466 /* Ask BE to create MCC queue */
1467 if (be_cmd_mccq_create(adapter, q, cq))
1468 goto mcc_q_free;
1469
1470 return 0;
1471
1472 mcc_q_free:
1473 be_queue_free(adapter, q);
1474 mcc_cq_destroy:
1475 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1476 mcc_cq_free:
1477 be_queue_free(adapter, cq);
1478 err:
1479 return -1;
1480 }
1481
1482 static void be_tx_queues_destroy(struct be_adapter *adapter)
1483 {
1484 struct be_queue_info *q;
1485
1486 q = &adapter->tx_obj.q;
1487 if (q->created)
1488 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1489 be_queue_free(adapter, q);
1490
1491 q = &adapter->tx_obj.cq;
1492 if (q->created)
1493 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1494 be_queue_free(adapter, q);
1495
1496 /* Clear any residual events */
1497 be_eq_clean(adapter, &adapter->tx_eq);
1498
1499 q = &adapter->tx_eq.q;
1500 if (q->created)
1501 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1502 be_queue_free(adapter, q);
1503 }
1504
1505 static int be_tx_queues_create(struct be_adapter *adapter)
1506 {
1507 struct be_queue_info *eq, *q, *cq;
1508
1509 adapter->tx_eq.max_eqd = 0;
1510 adapter->tx_eq.min_eqd = 0;
1511 adapter->tx_eq.cur_eqd = 96;
1512 adapter->tx_eq.enable_aic = false;
1513 /* Alloc Tx Event queue */
1514 eq = &adapter->tx_eq.q;
1515 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1516 return -1;
1517
1518 /* Ask BE to create Tx Event queue */
1519 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1520 goto tx_eq_free;
1521
1522 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1523
1524
1525 /* Alloc TX eth compl queue */
1526 cq = &adapter->tx_obj.cq;
1527 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1528 sizeof(struct be_eth_tx_compl)))
1529 goto tx_eq_destroy;
1530
1531 /* Ask BE to create Tx eth compl queue */
1532 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1533 goto tx_cq_free;
1534
1535 /* Alloc TX eth queue */
1536 q = &adapter->tx_obj.q;
1537 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1538 goto tx_cq_destroy;
1539
1540 /* Ask BE to create Tx eth queue */
1541 if (be_cmd_txq_create(adapter, q, cq))
1542 goto tx_q_free;
1543 return 0;
1544
1545 tx_q_free:
1546 be_queue_free(adapter, q);
1547 tx_cq_destroy:
1548 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1549 tx_cq_free:
1550 be_queue_free(adapter, cq);
1551 tx_eq_destroy:
1552 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1553 tx_eq_free:
1554 be_queue_free(adapter, eq);
1555 return -1;
1556 }
1557
1558 static void be_rx_queues_destroy(struct be_adapter *adapter)
1559 {
1560 struct be_queue_info *q;
1561 struct be_rx_obj *rxo;
1562 int i;
1563
1564 for_all_rx_queues(adapter, rxo, i) {
1565 q = &rxo->q;
1566 if (q->created) {
1567 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1568 /* After the rxq is invalidated, wait for a grace time
1569 * of 1ms for all dma to end and the flush compl to
1570 * arrive
1571 */
1572 mdelay(1);
1573 be_rx_q_clean(adapter, rxo);
1574 }
1575 be_queue_free(adapter, q);
1576
1577 q = &rxo->cq;
1578 if (q->created)
1579 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1580 be_queue_free(adapter, q);
1581
1582 /* Clear any residual events */
1583 q = &rxo->rx_eq.q;
1584 if (q->created) {
1585 be_eq_clean(adapter, &rxo->rx_eq);
1586 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1587 }
1588 be_queue_free(adapter, q);
1589 }
1590 }
1591
1592 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1593 {
1594 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1595 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1596 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1597 } else {
1598 dev_warn(&adapter->pdev->dev,
1599 "No support for multiple RX queues\n");
1600 return 1;
1601 }
1602 }
1603
1604 static int be_rx_queues_create(struct be_adapter *adapter)
1605 {
1606 struct be_queue_info *eq, *q, *cq;
1607 struct be_rx_obj *rxo;
1608 int rc, i;
1609
1610 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1611 msix_enabled(adapter) ?
1612 adapter->num_msix_vec - 1 : 1);
1613 if (adapter->num_rx_qs != MAX_RX_QS)
1614 dev_warn(&adapter->pdev->dev,
1615 "Can create only %d RX queues", adapter->num_rx_qs);
1616
1617 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1618 for_all_rx_queues(adapter, rxo, i) {
1619 rxo->adapter = adapter;
1620 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1621 rxo->rx_eq.enable_aic = true;
1622
1623 /* EQ */
1624 eq = &rxo->rx_eq.q;
1625 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1626 sizeof(struct be_eq_entry));
1627 if (rc)
1628 goto err;
1629
1630 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1631 if (rc)
1632 goto err;
1633
1634 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1635
1636 /* CQ */
1637 cq = &rxo->cq;
1638 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1639 sizeof(struct be_eth_rx_compl));
1640 if (rc)
1641 goto err;
1642
1643 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1644 if (rc)
1645 goto err;
1646 /* Rx Q */
1647 q = &rxo->q;
1648 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1649 sizeof(struct be_eth_rx_d));
1650 if (rc)
1651 goto err;
1652
1653 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1654 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1655 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1656 if (rc)
1657 goto err;
1658 }
1659
1660 if (be_multi_rxq(adapter)) {
1661 u8 rsstable[MAX_RSS_QS];
1662
1663 for_all_rss_queues(adapter, rxo, i)
1664 rsstable[i] = rxo->rss_id;
1665
1666 rc = be_cmd_rss_config(adapter, rsstable,
1667 adapter->num_rx_qs - 1);
1668 if (rc)
1669 goto err;
1670 }
1671
1672 return 0;
1673 err:
1674 be_rx_queues_destroy(adapter);
1675 return -1;
1676 }
1677
1678 static bool event_peek(struct be_eq_obj *eq_obj)
1679 {
1680 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1681 if (!eqe->evt)
1682 return false;
1683 else
1684 return true;
1685 }
1686
1687 static irqreturn_t be_intx(int irq, void *dev)
1688 {
1689 struct be_adapter *adapter = dev;
1690 struct be_rx_obj *rxo;
1691 int isr, i, tx = 0 , rx = 0;
1692
1693 if (lancer_chip(adapter)) {
1694 if (event_peek(&adapter->tx_eq))
1695 tx = event_handle(adapter, &adapter->tx_eq);
1696 for_all_rx_queues(adapter, rxo, i) {
1697 if (event_peek(&rxo->rx_eq))
1698 rx |= event_handle(adapter, &rxo->rx_eq);
1699 }
1700
1701 if (!(tx || rx))
1702 return IRQ_NONE;
1703
1704 } else {
1705 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1706 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1707 if (!isr)
1708 return IRQ_NONE;
1709
1710 if ((1 << adapter->tx_eq.eq_idx & isr))
1711 event_handle(adapter, &adapter->tx_eq);
1712
1713 for_all_rx_queues(adapter, rxo, i) {
1714 if ((1 << rxo->rx_eq.eq_idx & isr))
1715 event_handle(adapter, &rxo->rx_eq);
1716 }
1717 }
1718
1719 return IRQ_HANDLED;
1720 }
1721
1722 static irqreturn_t be_msix_rx(int irq, void *dev)
1723 {
1724 struct be_rx_obj *rxo = dev;
1725 struct be_adapter *adapter = rxo->adapter;
1726
1727 event_handle(adapter, &rxo->rx_eq);
1728
1729 return IRQ_HANDLED;
1730 }
1731
1732 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1733 {
1734 struct be_adapter *adapter = dev;
1735
1736 event_handle(adapter, &adapter->tx_eq);
1737
1738 return IRQ_HANDLED;
1739 }
1740
1741 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1742 {
1743 return (rxcp->tcpf && !rxcp->err) ? true : false;
1744 }
1745
1746 static int be_poll_rx(struct napi_struct *napi, int budget)
1747 {
1748 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1749 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1750 struct be_adapter *adapter = rxo->adapter;
1751 struct be_queue_info *rx_cq = &rxo->cq;
1752 struct be_rx_compl_info *rxcp;
1753 u32 work_done;
1754
1755 rxo->stats.rx_polls++;
1756 for (work_done = 0; work_done < budget; work_done++) {
1757 rxcp = be_rx_compl_get(rxo);
1758 if (!rxcp)
1759 break;
1760
1761 /* Ignore flush completions */
1762 if (rxcp->num_rcvd && rxcp->pkt_size) {
1763 if (do_gro(rxcp))
1764 be_rx_compl_process_gro(adapter, rxo, rxcp);
1765 else
1766 be_rx_compl_process(adapter, rxo, rxcp);
1767 } else if (rxcp->pkt_size == 0) {
1768 be_rx_compl_discard(adapter, rxo, rxcp);
1769 }
1770
1771 be_rx_stats_update(rxo, rxcp);
1772 }
1773
1774 /* Refill the queue */
1775 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1776 be_post_rx_frags(rxo, GFP_ATOMIC);
1777
1778 /* All consumed */
1779 if (work_done < budget) {
1780 napi_complete(napi);
1781 be_cq_notify(adapter, rx_cq->id, true, work_done);
1782 } else {
1783 /* More to be consumed; continue with interrupts disabled */
1784 be_cq_notify(adapter, rx_cq->id, false, work_done);
1785 }
1786 return work_done;
1787 }
1788
1789 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1790 * For TX/MCC we don't honour budget; consume everything
1791 */
1792 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1793 {
1794 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1795 struct be_adapter *adapter =
1796 container_of(tx_eq, struct be_adapter, tx_eq);
1797 struct be_queue_info *txq = &adapter->tx_obj.q;
1798 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1799 struct be_eth_tx_compl *txcp;
1800 int tx_compl = 0, mcc_compl, status = 0;
1801 u16 end_idx, num_wrbs = 0;
1802
1803 while ((txcp = be_tx_compl_get(tx_cq))) {
1804 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1805 wrb_index, txcp);
1806 num_wrbs += be_tx_compl_process(adapter, end_idx);
1807 tx_compl++;
1808 }
1809
1810 mcc_compl = be_process_mcc(adapter, &status);
1811
1812 napi_complete(napi);
1813
1814 if (mcc_compl) {
1815 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1816 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1817 }
1818
1819 if (tx_compl) {
1820 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1821
1822 atomic_sub(num_wrbs, &txq->used);
1823
1824 /* As Tx wrbs have been freed up, wake up netdev queue if
1825 * it was stopped due to lack of tx wrbs.
1826 */
1827 if (netif_queue_stopped(adapter->netdev) &&
1828 atomic_read(&txq->used) < txq->len / 2) {
1829 netif_wake_queue(adapter->netdev);
1830 }
1831
1832 tx_stats(adapter)->be_tx_events++;
1833 tx_stats(adapter)->be_tx_compl += tx_compl;
1834 }
1835
1836 return 1;
1837 }
1838
1839 void be_detect_dump_ue(struct be_adapter *adapter)
1840 {
1841 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1842 u32 i;
1843
1844 pci_read_config_dword(adapter->pdev,
1845 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1846 pci_read_config_dword(adapter->pdev,
1847 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1848 pci_read_config_dword(adapter->pdev,
1849 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1850 pci_read_config_dword(adapter->pdev,
1851 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1852
1853 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1854 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1855
1856 if (ue_status_lo || ue_status_hi) {
1857 adapter->ue_detected = true;
1858 adapter->eeh_err = true;
1859 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1860 }
1861
1862 if (ue_status_lo) {
1863 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1864 if (ue_status_lo & 1)
1865 dev_err(&adapter->pdev->dev,
1866 "UE: %s bit set\n", ue_status_low_desc[i]);
1867 }
1868 }
1869 if (ue_status_hi) {
1870 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1871 if (ue_status_hi & 1)
1872 dev_err(&adapter->pdev->dev,
1873 "UE: %s bit set\n", ue_status_hi_desc[i]);
1874 }
1875 }
1876
1877 }
1878
1879 static void be_worker(struct work_struct *work)
1880 {
1881 struct be_adapter *adapter =
1882 container_of(work, struct be_adapter, work.work);
1883 struct be_rx_obj *rxo;
1884 int i;
1885
1886 if (!adapter->ue_detected && !lancer_chip(adapter))
1887 be_detect_dump_ue(adapter);
1888
1889 /* when interrupts are not yet enabled, just reap any pending
1890 * mcc completions */
1891 if (!netif_running(adapter->netdev)) {
1892 int mcc_compl, status = 0;
1893
1894 mcc_compl = be_process_mcc(adapter, &status);
1895
1896 if (mcc_compl) {
1897 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1898 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1899 }
1900
1901 goto reschedule;
1902 }
1903
1904 if (!adapter->stats_cmd_sent)
1905 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1906
1907 be_tx_rate_update(adapter);
1908
1909 for_all_rx_queues(adapter, rxo, i) {
1910 be_rx_rate_update(rxo);
1911 be_rx_eqd_update(adapter, rxo);
1912
1913 if (rxo->rx_post_starved) {
1914 rxo->rx_post_starved = false;
1915 be_post_rx_frags(rxo, GFP_KERNEL);
1916 }
1917 }
1918
1919 reschedule:
1920 adapter->work_counter++;
1921 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1922 }
1923
1924 static void be_msix_disable(struct be_adapter *adapter)
1925 {
1926 if (msix_enabled(adapter)) {
1927 pci_disable_msix(adapter->pdev);
1928 adapter->num_msix_vec = 0;
1929 }
1930 }
1931
1932 static void be_msix_enable(struct be_adapter *adapter)
1933 {
1934 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1935 int i, status, num_vec;
1936
1937 num_vec = be_num_rxqs_want(adapter) + 1;
1938
1939 for (i = 0; i < num_vec; i++)
1940 adapter->msix_entries[i].entry = i;
1941
1942 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
1943 if (status == 0) {
1944 goto done;
1945 } else if (status >= BE_MIN_MSIX_VECTORS) {
1946 num_vec = status;
1947 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1948 num_vec) == 0)
1949 goto done;
1950 }
1951 return;
1952 done:
1953 adapter->num_msix_vec = num_vec;
1954 return;
1955 }
1956
1957 static void be_sriov_enable(struct be_adapter *adapter)
1958 {
1959 be_check_sriov_fn_type(adapter);
1960 #ifdef CONFIG_PCI_IOV
1961 if (be_physfn(adapter) && num_vfs) {
1962 int status, pos;
1963 u16 nvfs;
1964
1965 pos = pci_find_ext_capability(adapter->pdev,
1966 PCI_EXT_CAP_ID_SRIOV);
1967 pci_read_config_word(adapter->pdev,
1968 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1969
1970 if (num_vfs > nvfs) {
1971 dev_info(&adapter->pdev->dev,
1972 "Device supports %d VFs and not %d\n",
1973 nvfs, num_vfs);
1974 num_vfs = nvfs;
1975 }
1976
1977 status = pci_enable_sriov(adapter->pdev, num_vfs);
1978 adapter->sriov_enabled = status ? false : true;
1979 }
1980 #endif
1981 }
1982
1983 static void be_sriov_disable(struct be_adapter *adapter)
1984 {
1985 #ifdef CONFIG_PCI_IOV
1986 if (adapter->sriov_enabled) {
1987 pci_disable_sriov(adapter->pdev);
1988 adapter->sriov_enabled = false;
1989 }
1990 #endif
1991 }
1992
1993 static inline int be_msix_vec_get(struct be_adapter *adapter,
1994 struct be_eq_obj *eq_obj)
1995 {
1996 return adapter->msix_entries[eq_obj->eq_idx].vector;
1997 }
1998
1999 static int be_request_irq(struct be_adapter *adapter,
2000 struct be_eq_obj *eq_obj,
2001 void *handler, char *desc, void *context)
2002 {
2003 struct net_device *netdev = adapter->netdev;
2004 int vec;
2005
2006 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2007 vec = be_msix_vec_get(adapter, eq_obj);
2008 return request_irq(vec, handler, 0, eq_obj->desc, context);
2009 }
2010
2011 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2012 void *context)
2013 {
2014 int vec = be_msix_vec_get(adapter, eq_obj);
2015 free_irq(vec, context);
2016 }
2017
2018 static int be_msix_register(struct be_adapter *adapter)
2019 {
2020 struct be_rx_obj *rxo;
2021 int status, i;
2022 char qname[10];
2023
2024 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2025 adapter);
2026 if (status)
2027 goto err;
2028
2029 for_all_rx_queues(adapter, rxo, i) {
2030 sprintf(qname, "rxq%d", i);
2031 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2032 qname, rxo);
2033 if (status)
2034 goto err_msix;
2035 }
2036
2037 return 0;
2038
2039 err_msix:
2040 be_free_irq(adapter, &adapter->tx_eq, adapter);
2041
2042 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2043 be_free_irq(adapter, &rxo->rx_eq, rxo);
2044
2045 err:
2046 dev_warn(&adapter->pdev->dev,
2047 "MSIX Request IRQ failed - err %d\n", status);
2048 be_msix_disable(adapter);
2049 return status;
2050 }
2051
2052 static int be_irq_register(struct be_adapter *adapter)
2053 {
2054 struct net_device *netdev = adapter->netdev;
2055 int status;
2056
2057 if (msix_enabled(adapter)) {
2058 status = be_msix_register(adapter);
2059 if (status == 0)
2060 goto done;
2061 /* INTx is not supported for VF */
2062 if (!be_physfn(adapter))
2063 return status;
2064 }
2065
2066 /* INTx */
2067 netdev->irq = adapter->pdev->irq;
2068 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2069 adapter);
2070 if (status) {
2071 dev_err(&adapter->pdev->dev,
2072 "INTx request IRQ failed - err %d\n", status);
2073 return status;
2074 }
2075 done:
2076 adapter->isr_registered = true;
2077 return 0;
2078 }
2079
2080 static void be_irq_unregister(struct be_adapter *adapter)
2081 {
2082 struct net_device *netdev = adapter->netdev;
2083 struct be_rx_obj *rxo;
2084 int i;
2085
2086 if (!adapter->isr_registered)
2087 return;
2088
2089 /* INTx */
2090 if (!msix_enabled(adapter)) {
2091 free_irq(netdev->irq, adapter);
2092 goto done;
2093 }
2094
2095 /* MSIx */
2096 be_free_irq(adapter, &adapter->tx_eq, adapter);
2097
2098 for_all_rx_queues(adapter, rxo, i)
2099 be_free_irq(adapter, &rxo->rx_eq, rxo);
2100
2101 done:
2102 adapter->isr_registered = false;
2103 }
2104
2105 static int be_close(struct net_device *netdev)
2106 {
2107 struct be_adapter *adapter = netdev_priv(netdev);
2108 struct be_rx_obj *rxo;
2109 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2110 int vec, i;
2111
2112 be_async_mcc_disable(adapter);
2113
2114 netif_carrier_off(netdev);
2115 adapter->link_up = false;
2116
2117 if (!lancer_chip(adapter))
2118 be_intr_set(adapter, false);
2119
2120 for_all_rx_queues(adapter, rxo, i)
2121 napi_disable(&rxo->rx_eq.napi);
2122
2123 napi_disable(&tx_eq->napi);
2124
2125 if (lancer_chip(adapter)) {
2126 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2127 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2128 for_all_rx_queues(adapter, rxo, i)
2129 be_cq_notify(adapter, rxo->cq.id, false, 0);
2130 }
2131
2132 if (msix_enabled(adapter)) {
2133 vec = be_msix_vec_get(adapter, tx_eq);
2134 synchronize_irq(vec);
2135
2136 for_all_rx_queues(adapter, rxo, i) {
2137 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2138 synchronize_irq(vec);
2139 }
2140 } else {
2141 synchronize_irq(netdev->irq);
2142 }
2143 be_irq_unregister(adapter);
2144
2145 /* Wait for all pending tx completions to arrive so that
2146 * all tx skbs are freed.
2147 */
2148 be_tx_compl_clean(adapter);
2149
2150 return 0;
2151 }
2152
2153 static int be_open(struct net_device *netdev)
2154 {
2155 struct be_adapter *adapter = netdev_priv(netdev);
2156 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2157 struct be_rx_obj *rxo;
2158 bool link_up;
2159 int status, i;
2160 u8 mac_speed;
2161 u16 link_speed;
2162
2163 for_all_rx_queues(adapter, rxo, i) {
2164 be_post_rx_frags(rxo, GFP_KERNEL);
2165 napi_enable(&rxo->rx_eq.napi);
2166 }
2167 napi_enable(&tx_eq->napi);
2168
2169 be_irq_register(adapter);
2170
2171 if (!lancer_chip(adapter))
2172 be_intr_set(adapter, true);
2173
2174 /* The evt queues are created in unarmed state; arm them */
2175 for_all_rx_queues(adapter, rxo, i) {
2176 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2177 be_cq_notify(adapter, rxo->cq.id, true, 0);
2178 }
2179 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2180
2181 /* Now that interrupts are on we can process async mcc */
2182 be_async_mcc_enable(adapter);
2183
2184 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2185 &link_speed, 0);
2186 if (status)
2187 goto err;
2188 be_link_status_update(adapter, link_up);
2189
2190 if (be_physfn(adapter)) {
2191 status = be_vid_config(adapter, false, 0);
2192 if (status)
2193 goto err;
2194
2195 status = be_cmd_set_flow_control(adapter,
2196 adapter->tx_fc, adapter->rx_fc);
2197 if (status)
2198 goto err;
2199 }
2200
2201 return 0;
2202 err:
2203 be_close(adapter->netdev);
2204 return -EIO;
2205 }
2206
2207 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2208 {
2209 struct be_dma_mem cmd;
2210 int status = 0;
2211 u8 mac[ETH_ALEN];
2212
2213 memset(mac, 0, ETH_ALEN);
2214
2215 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2216 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2217 GFP_KERNEL);
2218 if (cmd.va == NULL)
2219 return -1;
2220 memset(cmd.va, 0, cmd.size);
2221
2222 if (enable) {
2223 status = pci_write_config_dword(adapter->pdev,
2224 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2225 if (status) {
2226 dev_err(&adapter->pdev->dev,
2227 "Could not enable Wake-on-lan\n");
2228 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2229 cmd.dma);
2230 return status;
2231 }
2232 status = be_cmd_enable_magic_wol(adapter,
2233 adapter->netdev->dev_addr, &cmd);
2234 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2235 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2236 } else {
2237 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2238 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2239 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2240 }
2241
2242 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2243 return status;
2244 }
2245
2246 /*
2247 * Generate a seed MAC address from the PF MAC Address using jhash.
2248 * MAC Address for VFs are assigned incrementally starting from the seed.
2249 * These addresses are programmed in the ASIC by the PF and the VF driver
2250 * queries for the MAC address during its probe.
2251 */
2252 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2253 {
2254 u32 vf = 0;
2255 int status = 0;
2256 u8 mac[ETH_ALEN];
2257
2258 be_vf_eth_addr_generate(adapter, mac);
2259
2260 for (vf = 0; vf < num_vfs; vf++) {
2261 status = be_cmd_pmac_add(adapter, mac,
2262 adapter->vf_cfg[vf].vf_if_handle,
2263 &adapter->vf_cfg[vf].vf_pmac_id,
2264 vf + 1);
2265 if (status)
2266 dev_err(&adapter->pdev->dev,
2267 "Mac address add failed for VF %d\n", vf);
2268 else
2269 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2270
2271 mac[5] += 1;
2272 }
2273 return status;
2274 }
2275
2276 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2277 {
2278 u32 vf;
2279
2280 for (vf = 0; vf < num_vfs; vf++) {
2281 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2282 be_cmd_pmac_del(adapter,
2283 adapter->vf_cfg[vf].vf_if_handle,
2284 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2285 }
2286 }
2287
2288 static int be_setup(struct be_adapter *adapter)
2289 {
2290 struct net_device *netdev = adapter->netdev;
2291 u32 cap_flags, en_flags, vf = 0;
2292 int status;
2293 u8 mac[ETH_ALEN];
2294
2295 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2296 BE_IF_FLAGS_BROADCAST |
2297 BE_IF_FLAGS_MULTICAST;
2298
2299 if (be_physfn(adapter)) {
2300 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2301 BE_IF_FLAGS_PROMISCUOUS |
2302 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2303 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2304
2305 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2306 cap_flags |= BE_IF_FLAGS_RSS;
2307 en_flags |= BE_IF_FLAGS_RSS;
2308 }
2309 }
2310
2311 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2312 netdev->dev_addr, false/* pmac_invalid */,
2313 &adapter->if_handle, &adapter->pmac_id, 0);
2314 if (status != 0)
2315 goto do_none;
2316
2317 if (be_physfn(adapter)) {
2318 if (adapter->sriov_enabled) {
2319 while (vf < num_vfs) {
2320 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2321 BE_IF_FLAGS_BROADCAST;
2322 status = be_cmd_if_create(adapter, cap_flags,
2323 en_flags, mac, true,
2324 &adapter->vf_cfg[vf].vf_if_handle,
2325 NULL, vf+1);
2326 if (status) {
2327 dev_err(&adapter->pdev->dev,
2328 "Interface Create failed for VF %d\n",
2329 vf);
2330 goto if_destroy;
2331 }
2332 adapter->vf_cfg[vf].vf_pmac_id =
2333 BE_INVALID_PMAC_ID;
2334 vf++;
2335 }
2336 }
2337 } else {
2338 status = be_cmd_mac_addr_query(adapter, mac,
2339 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2340 if (!status) {
2341 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2342 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2343 }
2344 }
2345
2346 status = be_tx_queues_create(adapter);
2347 if (status != 0)
2348 goto if_destroy;
2349
2350 status = be_rx_queues_create(adapter);
2351 if (status != 0)
2352 goto tx_qs_destroy;
2353
2354 status = be_mcc_queues_create(adapter);
2355 if (status != 0)
2356 goto rx_qs_destroy;
2357
2358 adapter->link_speed = -1;
2359
2360 return 0;
2361
2362 rx_qs_destroy:
2363 be_rx_queues_destroy(adapter);
2364 tx_qs_destroy:
2365 be_tx_queues_destroy(adapter);
2366 if_destroy:
2367 if (be_physfn(adapter) && adapter->sriov_enabled)
2368 for (vf = 0; vf < num_vfs; vf++)
2369 if (adapter->vf_cfg[vf].vf_if_handle)
2370 be_cmd_if_destroy(adapter,
2371 adapter->vf_cfg[vf].vf_if_handle,
2372 vf + 1);
2373 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2374 do_none:
2375 return status;
2376 }
2377
2378 static int be_clear(struct be_adapter *adapter)
2379 {
2380 int vf;
2381
2382 if (be_physfn(adapter) && adapter->sriov_enabled)
2383 be_vf_eth_addr_rem(adapter);
2384
2385 be_mcc_queues_destroy(adapter);
2386 be_rx_queues_destroy(adapter);
2387 be_tx_queues_destroy(adapter);
2388 adapter->eq_next_idx = 0;
2389
2390 if (be_physfn(adapter) && adapter->sriov_enabled)
2391 for (vf = 0; vf < num_vfs; vf++)
2392 if (adapter->vf_cfg[vf].vf_if_handle)
2393 be_cmd_if_destroy(adapter,
2394 adapter->vf_cfg[vf].vf_if_handle,
2395 vf + 1);
2396
2397 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2398
2399 /* tell fw we're done with firing cmds */
2400 be_cmd_fw_clean(adapter);
2401 return 0;
2402 }
2403
2404
2405 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2406 static bool be_flash_redboot(struct be_adapter *adapter,
2407 const u8 *p, u32 img_start, int image_size,
2408 int hdr_size)
2409 {
2410 u32 crc_offset;
2411 u8 flashed_crc[4];
2412 int status;
2413
2414 crc_offset = hdr_size + img_start + image_size - 4;
2415
2416 p += crc_offset;
2417
2418 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2419 (image_size - 4));
2420 if (status) {
2421 dev_err(&adapter->pdev->dev,
2422 "could not get crc from flash, not flashing redboot\n");
2423 return false;
2424 }
2425
2426 /*update redboot only if crc does not match*/
2427 if (!memcmp(flashed_crc, p, 4))
2428 return false;
2429 else
2430 return true;
2431 }
2432
2433 static int be_flash_data(struct be_adapter *adapter,
2434 const struct firmware *fw,
2435 struct be_dma_mem *flash_cmd, int num_of_images)
2436
2437 {
2438 int status = 0, i, filehdr_size = 0;
2439 u32 total_bytes = 0, flash_op;
2440 int num_bytes;
2441 const u8 *p = fw->data;
2442 struct be_cmd_write_flashrom *req = flash_cmd->va;
2443 const struct flash_comp *pflashcomp;
2444 int num_comp;
2445
2446 static const struct flash_comp gen3_flash_types[9] = {
2447 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2448 FLASH_IMAGE_MAX_SIZE_g3},
2449 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2450 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2451 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2452 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2453 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2454 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2455 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2456 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2457 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2458 FLASH_IMAGE_MAX_SIZE_g3},
2459 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2460 FLASH_IMAGE_MAX_SIZE_g3},
2461 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2462 FLASH_IMAGE_MAX_SIZE_g3},
2463 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2464 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2465 };
2466 static const struct flash_comp gen2_flash_types[8] = {
2467 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2468 FLASH_IMAGE_MAX_SIZE_g2},
2469 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2470 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2471 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2472 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2473 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2474 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2475 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2476 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2477 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2478 FLASH_IMAGE_MAX_SIZE_g2},
2479 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2480 FLASH_IMAGE_MAX_SIZE_g2},
2481 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2482 FLASH_IMAGE_MAX_SIZE_g2}
2483 };
2484
2485 if (adapter->generation == BE_GEN3) {
2486 pflashcomp = gen3_flash_types;
2487 filehdr_size = sizeof(struct flash_file_hdr_g3);
2488 num_comp = ARRAY_SIZE(gen3_flash_types);
2489 } else {
2490 pflashcomp = gen2_flash_types;
2491 filehdr_size = sizeof(struct flash_file_hdr_g2);
2492 num_comp = ARRAY_SIZE(gen2_flash_types);
2493 }
2494 for (i = 0; i < num_comp; i++) {
2495 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2496 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2497 continue;
2498 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2499 (!be_flash_redboot(adapter, fw->data,
2500 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2501 (num_of_images * sizeof(struct image_hdr)))))
2502 continue;
2503 p = fw->data;
2504 p += filehdr_size + pflashcomp[i].offset
2505 + (num_of_images * sizeof(struct image_hdr));
2506 if (p + pflashcomp[i].size > fw->data + fw->size)
2507 return -1;
2508 total_bytes = pflashcomp[i].size;
2509 while (total_bytes) {
2510 if (total_bytes > 32*1024)
2511 num_bytes = 32*1024;
2512 else
2513 num_bytes = total_bytes;
2514 total_bytes -= num_bytes;
2515
2516 if (!total_bytes)
2517 flash_op = FLASHROM_OPER_FLASH;
2518 else
2519 flash_op = FLASHROM_OPER_SAVE;
2520 memcpy(req->params.data_buf, p, num_bytes);
2521 p += num_bytes;
2522 status = be_cmd_write_flashrom(adapter, flash_cmd,
2523 pflashcomp[i].optype, flash_op, num_bytes);
2524 if (status) {
2525 dev_err(&adapter->pdev->dev,
2526 "cmd to write to flash rom failed.\n");
2527 return -1;
2528 }
2529 yield();
2530 }
2531 }
2532 return 0;
2533 }
2534
2535 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2536 {
2537 if (fhdr == NULL)
2538 return 0;
2539 if (fhdr->build[0] == '3')
2540 return BE_GEN3;
2541 else if (fhdr->build[0] == '2')
2542 return BE_GEN2;
2543 else
2544 return 0;
2545 }
2546
2547 int be_load_fw(struct be_adapter *adapter, u8 *func)
2548 {
2549 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2550 const struct firmware *fw;
2551 struct flash_file_hdr_g2 *fhdr;
2552 struct flash_file_hdr_g3 *fhdr3;
2553 struct image_hdr *img_hdr_ptr = NULL;
2554 struct be_dma_mem flash_cmd;
2555 int status, i = 0, num_imgs = 0;
2556 const u8 *p;
2557
2558 if (!netif_running(adapter->netdev)) {
2559 dev_err(&adapter->pdev->dev,
2560 "Firmware load not allowed (interface is down)\n");
2561 return -EPERM;
2562 }
2563
2564 strcpy(fw_file, func);
2565
2566 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2567 if (status)
2568 goto fw_exit;
2569
2570 p = fw->data;
2571 fhdr = (struct flash_file_hdr_g2 *) p;
2572 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2573
2574 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2575 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2576 &flash_cmd.dma, GFP_KERNEL);
2577 if (!flash_cmd.va) {
2578 status = -ENOMEM;
2579 dev_err(&adapter->pdev->dev,
2580 "Memory allocation failure while flashing\n");
2581 goto fw_exit;
2582 }
2583
2584 if ((adapter->generation == BE_GEN3) &&
2585 (get_ufigen_type(fhdr) == BE_GEN3)) {
2586 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2587 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2588 for (i = 0; i < num_imgs; i++) {
2589 img_hdr_ptr = (struct image_hdr *) (fw->data +
2590 (sizeof(struct flash_file_hdr_g3) +
2591 i * sizeof(struct image_hdr)));
2592 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2593 status = be_flash_data(adapter, fw, &flash_cmd,
2594 num_imgs);
2595 }
2596 } else if ((adapter->generation == BE_GEN2) &&
2597 (get_ufigen_type(fhdr) == BE_GEN2)) {
2598 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2599 } else {
2600 dev_err(&adapter->pdev->dev,
2601 "UFI and Interface are not compatible for flashing\n");
2602 status = -1;
2603 }
2604
2605 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2606 flash_cmd.dma);
2607 if (status) {
2608 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2609 goto fw_exit;
2610 }
2611
2612 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2613
2614 fw_exit:
2615 release_firmware(fw);
2616 return status;
2617 }
2618
2619 static struct net_device_ops be_netdev_ops = {
2620 .ndo_open = be_open,
2621 .ndo_stop = be_close,
2622 .ndo_start_xmit = be_xmit,
2623 .ndo_set_rx_mode = be_set_multicast_list,
2624 .ndo_set_mac_address = be_mac_addr_set,
2625 .ndo_change_mtu = be_change_mtu,
2626 .ndo_validate_addr = eth_validate_addr,
2627 .ndo_vlan_rx_register = be_vlan_register,
2628 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2629 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2630 .ndo_set_vf_mac = be_set_vf_mac,
2631 .ndo_set_vf_vlan = be_set_vf_vlan,
2632 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2633 .ndo_get_vf_config = be_get_vf_config
2634 };
2635
2636 static void be_netdev_init(struct net_device *netdev)
2637 {
2638 struct be_adapter *adapter = netdev_priv(netdev);
2639 struct be_rx_obj *rxo;
2640 int i;
2641
2642 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2643 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2644 NETIF_F_HW_VLAN_TX;
2645 if (be_multi_rxq(adapter))
2646 netdev->hw_features |= NETIF_F_RXHASH;
2647
2648 netdev->features |= netdev->hw_features |
2649 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2650
2651 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2652 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2653
2654 if (lancer_chip(adapter))
2655 netdev->vlan_features |= NETIF_F_TSO6;
2656
2657 netdev->flags |= IFF_MULTICAST;
2658
2659 /* Default settings for Rx and Tx flow control */
2660 adapter->rx_fc = true;
2661 adapter->tx_fc = true;
2662
2663 netif_set_gso_max_size(netdev, 65535);
2664
2665 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2666
2667 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2668
2669 for_all_rx_queues(adapter, rxo, i)
2670 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2671 BE_NAPI_WEIGHT);
2672
2673 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2674 BE_NAPI_WEIGHT);
2675 }
2676
2677 static void be_unmap_pci_bars(struct be_adapter *adapter)
2678 {
2679 if (adapter->csr)
2680 iounmap(adapter->csr);
2681 if (adapter->db)
2682 iounmap(adapter->db);
2683 if (adapter->pcicfg && be_physfn(adapter))
2684 iounmap(adapter->pcicfg);
2685 }
2686
2687 static int be_map_pci_bars(struct be_adapter *adapter)
2688 {
2689 u8 __iomem *addr;
2690 int pcicfg_reg, db_reg;
2691
2692 if (lancer_chip(adapter)) {
2693 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2694 pci_resource_len(adapter->pdev, 0));
2695 if (addr == NULL)
2696 return -ENOMEM;
2697 adapter->db = addr;
2698 return 0;
2699 }
2700
2701 if (be_physfn(adapter)) {
2702 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2703 pci_resource_len(adapter->pdev, 2));
2704 if (addr == NULL)
2705 return -ENOMEM;
2706 adapter->csr = addr;
2707 }
2708
2709 if (adapter->generation == BE_GEN2) {
2710 pcicfg_reg = 1;
2711 db_reg = 4;
2712 } else {
2713 pcicfg_reg = 0;
2714 if (be_physfn(adapter))
2715 db_reg = 4;
2716 else
2717 db_reg = 0;
2718 }
2719 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2720 pci_resource_len(adapter->pdev, db_reg));
2721 if (addr == NULL)
2722 goto pci_map_err;
2723 adapter->db = addr;
2724
2725 if (be_physfn(adapter)) {
2726 addr = ioremap_nocache(
2727 pci_resource_start(adapter->pdev, pcicfg_reg),
2728 pci_resource_len(adapter->pdev, pcicfg_reg));
2729 if (addr == NULL)
2730 goto pci_map_err;
2731 adapter->pcicfg = addr;
2732 } else
2733 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2734
2735 return 0;
2736 pci_map_err:
2737 be_unmap_pci_bars(adapter);
2738 return -ENOMEM;
2739 }
2740
2741
2742 static void be_ctrl_cleanup(struct be_adapter *adapter)
2743 {
2744 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2745
2746 be_unmap_pci_bars(adapter);
2747
2748 if (mem->va)
2749 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2750 mem->dma);
2751
2752 mem = &adapter->mc_cmd_mem;
2753 if (mem->va)
2754 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2755 mem->dma);
2756 }
2757
2758 static int be_ctrl_init(struct be_adapter *adapter)
2759 {
2760 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2761 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2762 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2763 int status;
2764
2765 status = be_map_pci_bars(adapter);
2766 if (status)
2767 goto done;
2768
2769 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2770 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2771 mbox_mem_alloc->size,
2772 &mbox_mem_alloc->dma,
2773 GFP_KERNEL);
2774 if (!mbox_mem_alloc->va) {
2775 status = -ENOMEM;
2776 goto unmap_pci_bars;
2777 }
2778
2779 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2780 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2781 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2782 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2783
2784 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2785 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2786 mc_cmd_mem->size, &mc_cmd_mem->dma,
2787 GFP_KERNEL);
2788 if (mc_cmd_mem->va == NULL) {
2789 status = -ENOMEM;
2790 goto free_mbox;
2791 }
2792 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2793
2794 mutex_init(&adapter->mbox_lock);
2795 spin_lock_init(&adapter->mcc_lock);
2796 spin_lock_init(&adapter->mcc_cq_lock);
2797
2798 init_completion(&adapter->flash_compl);
2799 pci_save_state(adapter->pdev);
2800 return 0;
2801
2802 free_mbox:
2803 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2804 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2805
2806 unmap_pci_bars:
2807 be_unmap_pci_bars(adapter);
2808
2809 done:
2810 return status;
2811 }
2812
2813 static void be_stats_cleanup(struct be_adapter *adapter)
2814 {
2815 struct be_dma_mem *cmd = &adapter->stats_cmd;
2816
2817 if (cmd->va)
2818 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2819 cmd->va, cmd->dma);
2820 }
2821
2822 static int be_stats_init(struct be_adapter *adapter)
2823 {
2824 struct be_dma_mem *cmd = &adapter->stats_cmd;
2825
2826 cmd->size = sizeof(struct be_cmd_req_get_stats);
2827 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2828 GFP_KERNEL);
2829 if (cmd->va == NULL)
2830 return -1;
2831 memset(cmd->va, 0, cmd->size);
2832 return 0;
2833 }
2834
2835 static void __devexit be_remove(struct pci_dev *pdev)
2836 {
2837 struct be_adapter *adapter = pci_get_drvdata(pdev);
2838
2839 if (!adapter)
2840 return;
2841
2842 cancel_delayed_work_sync(&adapter->work);
2843
2844 unregister_netdev(adapter->netdev);
2845
2846 be_clear(adapter);
2847
2848 be_stats_cleanup(adapter);
2849
2850 be_ctrl_cleanup(adapter);
2851
2852 kfree(adapter->vf_cfg);
2853 be_sriov_disable(adapter);
2854
2855 be_msix_disable(adapter);
2856
2857 pci_set_drvdata(pdev, NULL);
2858 pci_release_regions(pdev);
2859 pci_disable_device(pdev);
2860
2861 free_netdev(adapter->netdev);
2862 }
2863
2864 static int be_get_config(struct be_adapter *adapter)
2865 {
2866 int status;
2867 u8 mac[ETH_ALEN];
2868
2869 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2870 if (status)
2871 return status;
2872
2873 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2874 &adapter->function_mode, &adapter->function_caps);
2875 if (status)
2876 return status;
2877
2878 memset(mac, 0, ETH_ALEN);
2879
2880 if (be_physfn(adapter)) {
2881 status = be_cmd_mac_addr_query(adapter, mac,
2882 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2883
2884 if (status)
2885 return status;
2886
2887 if (!is_valid_ether_addr(mac))
2888 return -EADDRNOTAVAIL;
2889
2890 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2891 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2892 }
2893
2894 if (adapter->function_mode & 0x400)
2895 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2896 else
2897 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2898
2899 status = be_cmd_get_cntl_attributes(adapter);
2900 if (status)
2901 return status;
2902
2903 be_cmd_check_native_mode(adapter);
2904 return 0;
2905 }
2906
2907 static int be_dev_family_check(struct be_adapter *adapter)
2908 {
2909 struct pci_dev *pdev = adapter->pdev;
2910 u32 sli_intf = 0, if_type;
2911
2912 switch (pdev->device) {
2913 case BE_DEVICE_ID1:
2914 case OC_DEVICE_ID1:
2915 adapter->generation = BE_GEN2;
2916 break;
2917 case BE_DEVICE_ID2:
2918 case OC_DEVICE_ID2:
2919 adapter->generation = BE_GEN3;
2920 break;
2921 case OC_DEVICE_ID3:
2922 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2923 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2924 SLI_INTF_IF_TYPE_SHIFT;
2925
2926 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2927 if_type != 0x02) {
2928 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2929 return -EINVAL;
2930 }
2931 if (num_vfs > 0) {
2932 dev_err(&pdev->dev, "VFs not supported\n");
2933 return -EINVAL;
2934 }
2935 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2936 SLI_INTF_FAMILY_SHIFT);
2937 adapter->generation = BE_GEN3;
2938 break;
2939 default:
2940 adapter->generation = 0;
2941 }
2942 return 0;
2943 }
2944
2945 static int lancer_wait_ready(struct be_adapter *adapter)
2946 {
2947 #define SLIPORT_READY_TIMEOUT 500
2948 u32 sliport_status;
2949 int status = 0, i;
2950
2951 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2952 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2953 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2954 break;
2955
2956 msleep(20);
2957 }
2958
2959 if (i == SLIPORT_READY_TIMEOUT)
2960 status = -1;
2961
2962 return status;
2963 }
2964
2965 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2966 {
2967 int status;
2968 u32 sliport_status, err, reset_needed;
2969 status = lancer_wait_ready(adapter);
2970 if (!status) {
2971 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2972 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2973 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2974 if (err && reset_needed) {
2975 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2976 adapter->db + SLIPORT_CONTROL_OFFSET);
2977
2978 /* check adapter has corrected the error */
2979 status = lancer_wait_ready(adapter);
2980 sliport_status = ioread32(adapter->db +
2981 SLIPORT_STATUS_OFFSET);
2982 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2983 SLIPORT_STATUS_RN_MASK);
2984 if (status || sliport_status)
2985 status = -1;
2986 } else if (err || reset_needed) {
2987 status = -1;
2988 }
2989 }
2990 return status;
2991 }
2992
2993 static int __devinit be_probe(struct pci_dev *pdev,
2994 const struct pci_device_id *pdev_id)
2995 {
2996 int status = 0;
2997 struct be_adapter *adapter;
2998 struct net_device *netdev;
2999
3000 status = pci_enable_device(pdev);
3001 if (status)
3002 goto do_none;
3003
3004 status = pci_request_regions(pdev, DRV_NAME);
3005 if (status)
3006 goto disable_dev;
3007 pci_set_master(pdev);
3008
3009 netdev = alloc_etherdev(sizeof(struct be_adapter));
3010 if (netdev == NULL) {
3011 status = -ENOMEM;
3012 goto rel_reg;
3013 }
3014 adapter = netdev_priv(netdev);
3015 adapter->pdev = pdev;
3016 pci_set_drvdata(pdev, adapter);
3017
3018 status = be_dev_family_check(adapter);
3019 if (status)
3020 goto free_netdev;
3021
3022 adapter->netdev = netdev;
3023 SET_NETDEV_DEV(netdev, &pdev->dev);
3024
3025 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3026 if (!status) {
3027 netdev->features |= NETIF_F_HIGHDMA;
3028 } else {
3029 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3030 if (status) {
3031 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3032 goto free_netdev;
3033 }
3034 }
3035
3036 be_sriov_enable(adapter);
3037 if (adapter->sriov_enabled) {
3038 adapter->vf_cfg = kcalloc(num_vfs,
3039 sizeof(struct be_vf_cfg), GFP_KERNEL);
3040
3041 if (!adapter->vf_cfg)
3042 goto free_netdev;
3043 }
3044
3045 status = be_ctrl_init(adapter);
3046 if (status)
3047 goto free_vf_cfg;
3048
3049 if (lancer_chip(adapter)) {
3050 status = lancer_test_and_set_rdy_state(adapter);
3051 if (status) {
3052 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3053 goto ctrl_clean;
3054 }
3055 }
3056
3057 /* sync up with fw's ready state */
3058 if (be_physfn(adapter)) {
3059 status = be_cmd_POST(adapter);
3060 if (status)
3061 goto ctrl_clean;
3062 }
3063
3064 /* tell fw we're ready to fire cmds */
3065 status = be_cmd_fw_init(adapter);
3066 if (status)
3067 goto ctrl_clean;
3068
3069 status = be_cmd_reset_function(adapter);
3070 if (status)
3071 goto ctrl_clean;
3072
3073 status = be_stats_init(adapter);
3074 if (status)
3075 goto ctrl_clean;
3076
3077 status = be_get_config(adapter);
3078 if (status)
3079 goto stats_clean;
3080
3081 be_msix_enable(adapter);
3082
3083 INIT_DELAYED_WORK(&adapter->work, be_worker);
3084
3085 status = be_setup(adapter);
3086 if (status)
3087 goto msix_disable;
3088
3089 be_netdev_init(netdev);
3090 status = register_netdev(netdev);
3091 if (status != 0)
3092 goto unsetup;
3093 netif_carrier_off(netdev);
3094
3095 if (be_physfn(adapter) && adapter->sriov_enabled) {
3096 u8 mac_speed;
3097 bool link_up;
3098 u16 vf, lnk_speed;
3099
3100 status = be_vf_eth_addr_config(adapter);
3101 if (status)
3102 goto unreg_netdev;
3103
3104 for (vf = 0; vf < num_vfs; vf++) {
3105 status = be_cmd_link_status_query(adapter, &link_up,
3106 &mac_speed, &lnk_speed, vf + 1);
3107 if (!status)
3108 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3109 else
3110 goto unreg_netdev;
3111 }
3112 }
3113
3114 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3115 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3116 return 0;
3117
3118 unreg_netdev:
3119 unregister_netdev(netdev);
3120 unsetup:
3121 be_clear(adapter);
3122 msix_disable:
3123 be_msix_disable(adapter);
3124 stats_clean:
3125 be_stats_cleanup(adapter);
3126 ctrl_clean:
3127 be_ctrl_cleanup(adapter);
3128 free_vf_cfg:
3129 kfree(adapter->vf_cfg);
3130 free_netdev:
3131 be_sriov_disable(adapter);
3132 free_netdev(netdev);
3133 pci_set_drvdata(pdev, NULL);
3134 rel_reg:
3135 pci_release_regions(pdev);
3136 disable_dev:
3137 pci_disable_device(pdev);
3138 do_none:
3139 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3140 return status;
3141 }
3142
3143 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3144 {
3145 struct be_adapter *adapter = pci_get_drvdata(pdev);
3146 struct net_device *netdev = adapter->netdev;
3147
3148 cancel_delayed_work_sync(&adapter->work);
3149 if (adapter->wol)
3150 be_setup_wol(adapter, true);
3151
3152 netif_device_detach(netdev);
3153 if (netif_running(netdev)) {
3154 rtnl_lock();
3155 be_close(netdev);
3156 rtnl_unlock();
3157 }
3158 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3159 be_clear(adapter);
3160
3161 be_msix_disable(adapter);
3162 pci_save_state(pdev);
3163 pci_disable_device(pdev);
3164 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3165 return 0;
3166 }
3167
3168 static int be_resume(struct pci_dev *pdev)
3169 {
3170 int status = 0;
3171 struct be_adapter *adapter = pci_get_drvdata(pdev);
3172 struct net_device *netdev = adapter->netdev;
3173
3174 netif_device_detach(netdev);
3175
3176 status = pci_enable_device(pdev);
3177 if (status)
3178 return status;
3179
3180 pci_set_power_state(pdev, 0);
3181 pci_restore_state(pdev);
3182
3183 be_msix_enable(adapter);
3184 /* tell fw we're ready to fire cmds */
3185 status = be_cmd_fw_init(adapter);
3186 if (status)
3187 return status;
3188
3189 be_setup(adapter);
3190 if (netif_running(netdev)) {
3191 rtnl_lock();
3192 be_open(netdev);
3193 rtnl_unlock();
3194 }
3195 netif_device_attach(netdev);
3196
3197 if (adapter->wol)
3198 be_setup_wol(adapter, false);
3199
3200 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3201 return 0;
3202 }
3203
3204 /*
3205 * An FLR will stop BE from DMAing any data.
3206 */
3207 static void be_shutdown(struct pci_dev *pdev)
3208 {
3209 struct be_adapter *adapter = pci_get_drvdata(pdev);
3210
3211 if (!adapter)
3212 return;
3213
3214 cancel_delayed_work_sync(&adapter->work);
3215
3216 netif_device_detach(adapter->netdev);
3217
3218 if (adapter->wol)
3219 be_setup_wol(adapter, true);
3220
3221 be_cmd_reset_function(adapter);
3222
3223 pci_disable_device(pdev);
3224 }
3225
3226 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3227 pci_channel_state_t state)
3228 {
3229 struct be_adapter *adapter = pci_get_drvdata(pdev);
3230 struct net_device *netdev = adapter->netdev;
3231
3232 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3233
3234 adapter->eeh_err = true;
3235
3236 netif_device_detach(netdev);
3237
3238 if (netif_running(netdev)) {
3239 rtnl_lock();
3240 be_close(netdev);
3241 rtnl_unlock();
3242 }
3243 be_clear(adapter);
3244
3245 if (state == pci_channel_io_perm_failure)
3246 return PCI_ERS_RESULT_DISCONNECT;
3247
3248 pci_disable_device(pdev);
3249
3250 return PCI_ERS_RESULT_NEED_RESET;
3251 }
3252
3253 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3254 {
3255 struct be_adapter *adapter = pci_get_drvdata(pdev);
3256 int status;
3257
3258 dev_info(&adapter->pdev->dev, "EEH reset\n");
3259 adapter->eeh_err = false;
3260
3261 status = pci_enable_device(pdev);
3262 if (status)
3263 return PCI_ERS_RESULT_DISCONNECT;
3264
3265 pci_set_master(pdev);
3266 pci_set_power_state(pdev, 0);
3267 pci_restore_state(pdev);
3268
3269 /* Check if card is ok and fw is ready */
3270 status = be_cmd_POST(adapter);
3271 if (status)
3272 return PCI_ERS_RESULT_DISCONNECT;
3273
3274 return PCI_ERS_RESULT_RECOVERED;
3275 }
3276
3277 static void be_eeh_resume(struct pci_dev *pdev)
3278 {
3279 int status = 0;
3280 struct be_adapter *adapter = pci_get_drvdata(pdev);
3281 struct net_device *netdev = adapter->netdev;
3282
3283 dev_info(&adapter->pdev->dev, "EEH resume\n");
3284
3285 pci_save_state(pdev);
3286
3287 /* tell fw we're ready to fire cmds */
3288 status = be_cmd_fw_init(adapter);
3289 if (status)
3290 goto err;
3291
3292 status = be_setup(adapter);
3293 if (status)
3294 goto err;
3295
3296 if (netif_running(netdev)) {
3297 status = be_open(netdev);
3298 if (status)
3299 goto err;
3300 }
3301 netif_device_attach(netdev);
3302 return;
3303 err:
3304 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3305 }
3306
3307 static struct pci_error_handlers be_eeh_handlers = {
3308 .error_detected = be_eeh_err_detected,
3309 .slot_reset = be_eeh_reset,
3310 .resume = be_eeh_resume,
3311 };
3312
3313 static struct pci_driver be_driver = {
3314 .name = DRV_NAME,
3315 .id_table = be_dev_ids,
3316 .probe = be_probe,
3317 .remove = be_remove,
3318 .suspend = be_suspend,
3319 .resume = be_resume,
3320 .shutdown = be_shutdown,
3321 .err_handler = &be_eeh_handlers
3322 };
3323
3324 static int __init be_init_module(void)
3325 {
3326 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3327 rx_frag_size != 2048) {
3328 printk(KERN_WARNING DRV_NAME
3329 " : Module param rx_frag_size must be 2048/4096/8192."
3330 " Using 2048\n");
3331 rx_frag_size = 2048;
3332 }
3333
3334 return pci_register_driver(&be_driver);
3335 }
3336 module_init(be_init_module);
3337
3338 static void __exit be_exit_module(void)
3339 {
3340 pci_unregister_driver(&be_driver);
3341 }
3342 module_exit(be_exit_module);
This page took 0.095928 seconds and 6 git commands to generate.