be2net: Support for version 1 of stats for BE3
[deliverable/linux.git] / drivers / net / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static ushort rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, ushort, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117 };
118
119 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120 {
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
125 }
126
127 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129 {
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142 }
143
144 static void be_intr_set(struct be_adapter *adapter, bool enable)
145 {
146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
149
150 if (adapter->eeh_err)
151 return;
152
153 if (!enabled && enable)
154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 else if (enabled && !enable)
156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157 else
158 return;
159
160 iowrite32(reg, addr);
161 }
162
163 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
164 {
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
168
169 wmb();
170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
171 }
172
173 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
174 {
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
178
179 wmb();
180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
181 }
182
183 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
184 bool arm, bool clear_int, u16 num_popped)
185 {
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
190
191 if (adapter->eeh_err)
192 return;
193
194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
201 }
202
203 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
204 {
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
209
210 if (adapter->eeh_err)
211 return;
212
213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
217 }
218
219 static int be_mac_addr_set(struct net_device *netdev, void *p)
220 {
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
236 if (status)
237 return status;
238
239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
240 adapter->if_handle, &adapter->pmac_id, 0);
241 netdev_addr:
242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246 }
247
248 static void populate_be2_stats(struct be_adapter *adapter)
249 {
250
251 struct be_drv_stats *drvs = &adapter->drv_stats;
252 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
253 struct be_port_rxf_stats_v0 *port_stats =
254 be_port_rxf_stats_from_cmd(adapter);
255 struct be_rxf_stats_v0 *rxf_stats =
256 be_rxf_stats_from_cmd(adapter);
257
258 drvs->rx_pause_frames = port_stats->rx_pause_frames;
259 drvs->rx_crc_errors = port_stats->rx_crc_errors;
260 drvs->rx_control_frames = port_stats->rx_control_frames;
261 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
262 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
263 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
264 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
265 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
266 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
267 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
268 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
269 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
270 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
271 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
272 drvs->rx_input_fifo_overflow_drop =
273 port_stats->rx_input_fifo_overflow;
274 drvs->rx_dropped_header_too_small =
275 port_stats->rx_dropped_header_too_small;
276 drvs->rx_address_match_errors =
277 port_stats->rx_address_match_errors;
278 drvs->rx_alignment_symbol_errors =
279 port_stats->rx_alignment_symbol_errors;
280
281 drvs->tx_pauseframes = port_stats->tx_pauseframes;
282 drvs->tx_controlframes = port_stats->tx_controlframes;
283
284 if (adapter->port_num)
285 drvs->jabber_events =
286 rxf_stats->port1_jabber_events;
287 else
288 drvs->jabber_events =
289 rxf_stats->port0_jabber_events;
290 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
291 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
292 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
293 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
294 drvs->forwarded_packets = rxf_stats->forwarded_packets;
295 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
296 drvs->rx_drops_no_tpre_descr =
297 rxf_stats->rx_drops_no_tpre_descr;
298 drvs->rx_drops_too_many_frags =
299 rxf_stats->rx_drops_too_many_frags;
300 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
301 }
302
303 static void populate_be3_stats(struct be_adapter *adapter)
304 {
305 struct be_drv_stats *drvs = &adapter->drv_stats;
306 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
307
308 struct be_rxf_stats_v1 *rxf_stats =
309 be_rxf_stats_from_cmd(adapter);
310 struct be_port_rxf_stats_v1 *port_stats =
311 be_port_rxf_stats_from_cmd(adapter);
312
313 drvs->rx_priority_pause_frames = 0;
314 drvs->pmem_fifo_overflow_drop = 0;
315 drvs->rx_pause_frames = port_stats->rx_pause_frames;
316 drvs->rx_crc_errors = port_stats->rx_crc_errors;
317 drvs->rx_control_frames = port_stats->rx_control_frames;
318 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
319 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
320 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
321 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
322 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
323 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
324 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
325 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
326 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
327 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
328 drvs->rx_dropped_header_too_small =
329 port_stats->rx_dropped_header_too_small;
330 drvs->rx_input_fifo_overflow_drop =
331 port_stats->rx_input_fifo_overflow_drop;
332 drvs->rx_address_match_errors =
333 port_stats->rx_address_match_errors;
334 drvs->rx_alignment_symbol_errors =
335 port_stats->rx_alignment_symbol_errors;
336 drvs->rxpp_fifo_overflow_drop =
337 port_stats->rxpp_fifo_overflow_drop;
338 drvs->tx_pauseframes = port_stats->tx_pauseframes;
339 drvs->tx_controlframes = port_stats->tx_controlframes;
340 drvs->jabber_events = port_stats->jabber_events;
341 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
342 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
343 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
344 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
345 drvs->forwarded_packets = rxf_stats->forwarded_packets;
346 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
347 drvs->rx_drops_no_tpre_descr =
348 rxf_stats->rx_drops_no_tpre_descr;
349 drvs->rx_drops_too_many_frags =
350 rxf_stats->rx_drops_too_many_frags;
351 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352 }
353
354
355
356 void be_parse_stats(struct be_adapter *adapter)
357 {
358 if (adapter->generation == BE_GEN3)
359 populate_be3_stats(adapter);
360 else
361 populate_be2_stats(adapter);
362 }
363
364 void netdev_stats_update(struct be_adapter *adapter)
365 {
366 struct be_drv_stats *drvs = &adapter->drv_stats;
367 struct net_device_stats *dev_stats = &adapter->netdev->stats;
368 struct be_rx_obj *rxo;
369 int i;
370
371 memset(dev_stats, 0, sizeof(*dev_stats));
372 for_all_rx_queues(adapter, rxo, i) {
373 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
374 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
375 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
376 /* no space in linux buffers: best possible approximation */
377 if (adapter->generation == BE_GEN3) {
378 struct be_erx_stats_v1 *erx_stats =
379 be_erx_stats_from_cmd(adapter);
380 dev_stats->rx_dropped +=
381 erx_stats->rx_drops_no_fragments[rxo->q.id];
382 } else {
383 struct be_erx_stats_v0 *erx_stats =
384 be_erx_stats_from_cmd(adapter);
385 dev_stats->rx_dropped +=
386 erx_stats->rx_drops_no_fragments[rxo->q.id];
387 }
388 }
389
390 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
391 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
392
393 /* bad pkts received */
394 dev_stats->rx_errors = drvs->rx_crc_errors +
395 drvs->rx_alignment_symbol_errors +
396 drvs->rx_in_range_errors +
397 drvs->rx_out_range_errors +
398 drvs->rx_frame_too_long +
399 drvs->rx_dropped_too_small +
400 drvs->rx_dropped_too_short +
401 drvs->rx_dropped_header_too_small +
402 drvs->rx_dropped_tcp_length +
403 drvs->rx_dropped_runt +
404 drvs->rx_tcp_checksum_errs +
405 drvs->rx_ip_checksum_errs +
406 drvs->rx_udp_checksum_errs;
407
408 /* detailed rx errors */
409 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
410 drvs->rx_out_range_errors +
411 drvs->rx_frame_too_long;
412
413 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
414
415 /* frame alignment errors */
416 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
417
418 /* receiver fifo overrun */
419 /* drops_no_pbuf is no per i/f, it's per BE card */
420 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
421 drvs->rx_input_fifo_overflow_drop +
422 drvs->rx_drops_no_pbuf;
423 }
424
425 void be_link_status_update(struct be_adapter *adapter, bool link_up)
426 {
427 struct net_device *netdev = adapter->netdev;
428
429 /* If link came up or went down */
430 if (adapter->link_up != link_up) {
431 adapter->link_speed = -1;
432 if (link_up) {
433 netif_carrier_on(netdev);
434 printk(KERN_INFO "%s: Link up\n", netdev->name);
435 } else {
436 netif_carrier_off(netdev);
437 printk(KERN_INFO "%s: Link down\n", netdev->name);
438 }
439 adapter->link_up = link_up;
440 }
441 }
442
443 /* Update the EQ delay n BE based on the RX frags consumed / sec */
444 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
445 {
446 struct be_eq_obj *rx_eq = &rxo->rx_eq;
447 struct be_rx_stats *stats = &rxo->stats;
448 ulong now = jiffies;
449 u32 eqd;
450
451 if (!rx_eq->enable_aic)
452 return;
453
454 /* Wrapped around */
455 if (time_before(now, stats->rx_fps_jiffies)) {
456 stats->rx_fps_jiffies = now;
457 return;
458 }
459
460 /* Update once a second */
461 if ((now - stats->rx_fps_jiffies) < HZ)
462 return;
463
464 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
465 ((now - stats->rx_fps_jiffies) / HZ);
466
467 stats->rx_fps_jiffies = now;
468 stats->prev_rx_frags = stats->rx_frags;
469 eqd = stats->rx_fps / 110000;
470 eqd = eqd << 3;
471 if (eqd > rx_eq->max_eqd)
472 eqd = rx_eq->max_eqd;
473 if (eqd < rx_eq->min_eqd)
474 eqd = rx_eq->min_eqd;
475 if (eqd < 10)
476 eqd = 0;
477 if (eqd != rx_eq->cur_eqd)
478 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
479
480 rx_eq->cur_eqd = eqd;
481 }
482
483 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
484 {
485 u64 rate = bytes;
486
487 do_div(rate, ticks / HZ);
488 rate <<= 3; /* bytes/sec -> bits/sec */
489 do_div(rate, 1000000ul); /* MB/Sec */
490
491 return rate;
492 }
493
494 static void be_tx_rate_update(struct be_adapter *adapter)
495 {
496 struct be_tx_stats *stats = tx_stats(adapter);
497 ulong now = jiffies;
498
499 /* Wrapped around? */
500 if (time_before(now, stats->be_tx_jiffies)) {
501 stats->be_tx_jiffies = now;
502 return;
503 }
504
505 /* Update tx rate once in two seconds */
506 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
507 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
508 - stats->be_tx_bytes_prev,
509 now - stats->be_tx_jiffies);
510 stats->be_tx_jiffies = now;
511 stats->be_tx_bytes_prev = stats->be_tx_bytes;
512 }
513 }
514
515 static void be_tx_stats_update(struct be_adapter *adapter,
516 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
517 {
518 struct be_tx_stats *stats = tx_stats(adapter);
519 stats->be_tx_reqs++;
520 stats->be_tx_wrbs += wrb_cnt;
521 stats->be_tx_bytes += copied;
522 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
523 if (stopped)
524 stats->be_tx_stops++;
525 }
526
527 /* Determine number of WRB entries needed to xmit data in an skb */
528 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
529 bool *dummy)
530 {
531 int cnt = (skb->len > skb->data_len);
532
533 cnt += skb_shinfo(skb)->nr_frags;
534
535 /* to account for hdr wrb */
536 cnt++;
537 if (lancer_chip(adapter) || !(cnt & 1)) {
538 *dummy = false;
539 } else {
540 /* add a dummy to make it an even num */
541 cnt++;
542 *dummy = true;
543 }
544 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
545 return cnt;
546 }
547
548 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
549 {
550 wrb->frag_pa_hi = upper_32_bits(addr);
551 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
552 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
553 }
554
555 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
556 struct sk_buff *skb, u32 wrb_cnt, u32 len)
557 {
558 u8 vlan_prio = 0;
559 u16 vlan_tag = 0;
560
561 memset(hdr, 0, sizeof(*hdr));
562
563 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
564
565 if (skb_is_gso(skb)) {
566 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
568 hdr, skb_shinfo(skb)->gso_size);
569 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
571 if (lancer_chip(adapter) && adapter->sli_family ==
572 LANCER_A0_SLI_FAMILY) {
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
574 if (is_tcp_pkt(skb))
575 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
576 tcpcs, hdr, 1);
577 else if (is_udp_pkt(skb))
578 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
579 udpcs, hdr, 1);
580 }
581 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
582 if (is_tcp_pkt(skb))
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
584 else if (is_udp_pkt(skb))
585 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
586 }
587
588 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
590 vlan_tag = vlan_tx_tag_get(skb);
591 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
592 /* If vlan priority provided by OS is NOT in available bmap */
593 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
594 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
595 adapter->recommended_prio;
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
597 }
598
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
603 }
604
605 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
606 bool unmap_single)
607 {
608 dma_addr_t dma;
609
610 be_dws_le_to_cpu(wrb, sizeof(*wrb));
611
612 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
613 if (wrb->frag_len) {
614 if (unmap_single)
615 dma_unmap_single(dev, dma, wrb->frag_len,
616 DMA_TO_DEVICE);
617 else
618 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
619 }
620 }
621
622 static int make_tx_wrbs(struct be_adapter *adapter,
623 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
624 {
625 dma_addr_t busaddr;
626 int i, copied = 0;
627 struct device *dev = &adapter->pdev->dev;
628 struct sk_buff *first_skb = skb;
629 struct be_queue_info *txq = &adapter->tx_obj.q;
630 struct be_eth_wrb *wrb;
631 struct be_eth_hdr_wrb *hdr;
632 bool map_single = false;
633 u16 map_head;
634
635 hdr = queue_head_node(txq);
636 queue_head_inc(txq);
637 map_head = txq->head;
638
639 if (skb->len > skb->data_len) {
640 int len = skb_headlen(skb);
641 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
642 if (dma_mapping_error(dev, busaddr))
643 goto dma_err;
644 map_single = true;
645 wrb = queue_head_node(txq);
646 wrb_fill(wrb, busaddr, len);
647 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648 queue_head_inc(txq);
649 copied += len;
650 }
651
652 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
653 struct skb_frag_struct *frag =
654 &skb_shinfo(skb)->frags[i];
655 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
656 frag->size, DMA_TO_DEVICE);
657 if (dma_mapping_error(dev, busaddr))
658 goto dma_err;
659 wrb = queue_head_node(txq);
660 wrb_fill(wrb, busaddr, frag->size);
661 be_dws_cpu_to_le(wrb, sizeof(*wrb));
662 queue_head_inc(txq);
663 copied += frag->size;
664 }
665
666 if (dummy_wrb) {
667 wrb = queue_head_node(txq);
668 wrb_fill(wrb, 0, 0);
669 be_dws_cpu_to_le(wrb, sizeof(*wrb));
670 queue_head_inc(txq);
671 }
672
673 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
674 be_dws_cpu_to_le(hdr, sizeof(*hdr));
675
676 return copied;
677 dma_err:
678 txq->head = map_head;
679 while (copied) {
680 wrb = queue_head_node(txq);
681 unmap_tx_frag(dev, wrb, map_single);
682 map_single = false;
683 copied -= wrb->frag_len;
684 queue_head_inc(txq);
685 }
686 return 0;
687 }
688
689 static netdev_tx_t be_xmit(struct sk_buff *skb,
690 struct net_device *netdev)
691 {
692 struct be_adapter *adapter = netdev_priv(netdev);
693 struct be_tx_obj *tx_obj = &adapter->tx_obj;
694 struct be_queue_info *txq = &tx_obj->q;
695 u32 wrb_cnt = 0, copied = 0;
696 u32 start = txq->head;
697 bool dummy_wrb, stopped = false;
698
699 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
700
701 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
702 if (copied) {
703 /* record the sent skb in the sent_skb table */
704 BUG_ON(tx_obj->sent_skb_list[start]);
705 tx_obj->sent_skb_list[start] = skb;
706
707 /* Ensure txq has space for the next skb; Else stop the queue
708 * *BEFORE* ringing the tx doorbell, so that we serialze the
709 * tx compls of the current transmit which'll wake up the queue
710 */
711 atomic_add(wrb_cnt, &txq->used);
712 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
713 txq->len) {
714 netif_stop_queue(netdev);
715 stopped = true;
716 }
717
718 be_txq_notify(adapter, txq->id, wrb_cnt);
719
720 be_tx_stats_update(adapter, wrb_cnt, copied,
721 skb_shinfo(skb)->gso_segs, stopped);
722 } else {
723 txq->head = start;
724 dev_kfree_skb_any(skb);
725 }
726 return NETDEV_TX_OK;
727 }
728
729 static int be_change_mtu(struct net_device *netdev, int new_mtu)
730 {
731 struct be_adapter *adapter = netdev_priv(netdev);
732 if (new_mtu < BE_MIN_MTU ||
733 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
734 (ETH_HLEN + ETH_FCS_LEN))) {
735 dev_info(&adapter->pdev->dev,
736 "MTU must be between %d and %d bytes\n",
737 BE_MIN_MTU,
738 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
739 return -EINVAL;
740 }
741 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
742 netdev->mtu, new_mtu);
743 netdev->mtu = new_mtu;
744 return 0;
745 }
746
747 /*
748 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
749 * If the user configures more, place BE in vlan promiscuous mode.
750 */
751 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
752 {
753 u16 vtag[BE_NUM_VLANS_SUPPORTED];
754 u16 ntags = 0, i;
755 int status = 0;
756 u32 if_handle;
757
758 if (vf) {
759 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
760 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
761 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
762 }
763
764 if (adapter->vlans_added <= adapter->max_vlans) {
765 /* Construct VLAN Table to give to HW */
766 for (i = 0; i < VLAN_N_VID; i++) {
767 if (adapter->vlan_tag[i]) {
768 vtag[ntags] = cpu_to_le16(i);
769 ntags++;
770 }
771 }
772 status = be_cmd_vlan_config(adapter, adapter->if_handle,
773 vtag, ntags, 1, 0);
774 } else {
775 status = be_cmd_vlan_config(adapter, adapter->if_handle,
776 NULL, 0, 1, 1);
777 }
778
779 return status;
780 }
781
782 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
783 {
784 struct be_adapter *adapter = netdev_priv(netdev);
785
786 adapter->vlan_grp = grp;
787 }
788
789 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
790 {
791 struct be_adapter *adapter = netdev_priv(netdev);
792
793 adapter->vlans_added++;
794 if (!be_physfn(adapter))
795 return;
796
797 adapter->vlan_tag[vid] = 1;
798 if (adapter->vlans_added <= (adapter->max_vlans + 1))
799 be_vid_config(adapter, false, 0);
800 }
801
802 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
803 {
804 struct be_adapter *adapter = netdev_priv(netdev);
805
806 adapter->vlans_added--;
807 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
808
809 if (!be_physfn(adapter))
810 return;
811
812 adapter->vlan_tag[vid] = 0;
813 if (adapter->vlans_added <= adapter->max_vlans)
814 be_vid_config(adapter, false, 0);
815 }
816
817 static void be_set_multicast_list(struct net_device *netdev)
818 {
819 struct be_adapter *adapter = netdev_priv(netdev);
820
821 if (netdev->flags & IFF_PROMISC) {
822 be_cmd_promiscuous_config(adapter, true);
823 adapter->promiscuous = true;
824 goto done;
825 }
826
827 /* BE was previously in promiscuous mode; disable it */
828 if (adapter->promiscuous) {
829 adapter->promiscuous = false;
830 be_cmd_promiscuous_config(adapter, false);
831 }
832
833 /* Enable multicast promisc if num configured exceeds what we support */
834 if (netdev->flags & IFF_ALLMULTI ||
835 netdev_mc_count(netdev) > BE_MAX_MC) {
836 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
837 &adapter->mc_cmd_mem);
838 goto done;
839 }
840
841 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
842 &adapter->mc_cmd_mem);
843 done:
844 return;
845 }
846
847 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
848 {
849 struct be_adapter *adapter = netdev_priv(netdev);
850 int status;
851
852 if (!adapter->sriov_enabled)
853 return -EPERM;
854
855 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
856 return -EINVAL;
857
858 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
859 status = be_cmd_pmac_del(adapter,
860 adapter->vf_cfg[vf].vf_if_handle,
861 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
862
863 status = be_cmd_pmac_add(adapter, mac,
864 adapter->vf_cfg[vf].vf_if_handle,
865 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
866
867 if (status)
868 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
869 mac, vf);
870 else
871 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
872
873 return status;
874 }
875
876 static int be_get_vf_config(struct net_device *netdev, int vf,
877 struct ifla_vf_info *vi)
878 {
879 struct be_adapter *adapter = netdev_priv(netdev);
880
881 if (!adapter->sriov_enabled)
882 return -EPERM;
883
884 if (vf >= num_vfs)
885 return -EINVAL;
886
887 vi->vf = vf;
888 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
889 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
890 vi->qos = 0;
891 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
892
893 return 0;
894 }
895
896 static int be_set_vf_vlan(struct net_device *netdev,
897 int vf, u16 vlan, u8 qos)
898 {
899 struct be_adapter *adapter = netdev_priv(netdev);
900 int status = 0;
901
902 if (!adapter->sriov_enabled)
903 return -EPERM;
904
905 if ((vf >= num_vfs) || (vlan > 4095))
906 return -EINVAL;
907
908 if (vlan) {
909 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
910 adapter->vlans_added++;
911 } else {
912 adapter->vf_cfg[vf].vf_vlan_tag = 0;
913 adapter->vlans_added--;
914 }
915
916 status = be_vid_config(adapter, true, vf);
917
918 if (status)
919 dev_info(&adapter->pdev->dev,
920 "VLAN %d config on VF %d failed\n", vlan, vf);
921 return status;
922 }
923
924 static int be_set_vf_tx_rate(struct net_device *netdev,
925 int vf, int rate)
926 {
927 struct be_adapter *adapter = netdev_priv(netdev);
928 int status = 0;
929
930 if (!adapter->sriov_enabled)
931 return -EPERM;
932
933 if ((vf >= num_vfs) || (rate < 0))
934 return -EINVAL;
935
936 if (rate > 10000)
937 rate = 10000;
938
939 adapter->vf_cfg[vf].vf_tx_rate = rate;
940 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
941
942 if (status)
943 dev_info(&adapter->pdev->dev,
944 "tx rate %d on VF %d failed\n", rate, vf);
945 return status;
946 }
947
948 static void be_rx_rate_update(struct be_rx_obj *rxo)
949 {
950 struct be_rx_stats *stats = &rxo->stats;
951 ulong now = jiffies;
952
953 /* Wrapped around */
954 if (time_before(now, stats->rx_jiffies)) {
955 stats->rx_jiffies = now;
956 return;
957 }
958
959 /* Update the rate once in two seconds */
960 if ((now - stats->rx_jiffies) < 2 * HZ)
961 return;
962
963 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
964 now - stats->rx_jiffies);
965 stats->rx_jiffies = now;
966 stats->rx_bytes_prev = stats->rx_bytes;
967 }
968
969 static void be_rx_stats_update(struct be_rx_obj *rxo,
970 struct be_rx_compl_info *rxcp)
971 {
972 struct be_rx_stats *stats = &rxo->stats;
973
974 stats->rx_compl++;
975 stats->rx_frags += rxcp->num_rcvd;
976 stats->rx_bytes += rxcp->pkt_size;
977 stats->rx_pkts++;
978 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
979 stats->rx_mcast_pkts++;
980 if (rxcp->err)
981 stats->rxcp_err++;
982 }
983
984 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
985 {
986 /* L4 checksum is not reliable for non TCP/UDP packets.
987 * Also ignore ipcksm for ipv6 pkts */
988 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
989 (rxcp->ip_csum || rxcp->ipv6);
990 }
991
992 static struct be_rx_page_info *
993 get_rx_page_info(struct be_adapter *adapter,
994 struct be_rx_obj *rxo,
995 u16 frag_idx)
996 {
997 struct be_rx_page_info *rx_page_info;
998 struct be_queue_info *rxq = &rxo->q;
999
1000 rx_page_info = &rxo->page_info_tbl[frag_idx];
1001 BUG_ON(!rx_page_info->page);
1002
1003 if (rx_page_info->last_page_user) {
1004 dma_unmap_page(&adapter->pdev->dev,
1005 dma_unmap_addr(rx_page_info, bus),
1006 adapter->big_page_size, DMA_FROM_DEVICE);
1007 rx_page_info->last_page_user = false;
1008 }
1009
1010 atomic_dec(&rxq->used);
1011 return rx_page_info;
1012 }
1013
1014 /* Throwaway the data in the Rx completion */
1015 static void be_rx_compl_discard(struct be_adapter *adapter,
1016 struct be_rx_obj *rxo,
1017 struct be_rx_compl_info *rxcp)
1018 {
1019 struct be_queue_info *rxq = &rxo->q;
1020 struct be_rx_page_info *page_info;
1021 u16 i, num_rcvd = rxcp->num_rcvd;
1022
1023 for (i = 0; i < num_rcvd; i++) {
1024 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1025 put_page(page_info->page);
1026 memset(page_info, 0, sizeof(*page_info));
1027 index_inc(&rxcp->rxq_idx, rxq->len);
1028 }
1029 }
1030
1031 /*
1032 * skb_fill_rx_data forms a complete skb for an ether frame
1033 * indicated by rxcp.
1034 */
1035 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1036 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1037 {
1038 struct be_queue_info *rxq = &rxo->q;
1039 struct be_rx_page_info *page_info;
1040 u16 i, j;
1041 u16 hdr_len, curr_frag_len, remaining;
1042 u8 *start;
1043
1044 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1045 start = page_address(page_info->page) + page_info->page_offset;
1046 prefetch(start);
1047
1048 /* Copy data in the first descriptor of this completion */
1049 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1050
1051 /* Copy the header portion into skb_data */
1052 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1053 memcpy(skb->data, start, hdr_len);
1054 skb->len = curr_frag_len;
1055 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1056 /* Complete packet has now been moved to data */
1057 put_page(page_info->page);
1058 skb->data_len = 0;
1059 skb->tail += curr_frag_len;
1060 } else {
1061 skb_shinfo(skb)->nr_frags = 1;
1062 skb_shinfo(skb)->frags[0].page = page_info->page;
1063 skb_shinfo(skb)->frags[0].page_offset =
1064 page_info->page_offset + hdr_len;
1065 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1066 skb->data_len = curr_frag_len - hdr_len;
1067 skb->tail += hdr_len;
1068 }
1069 page_info->page = NULL;
1070
1071 if (rxcp->pkt_size <= rx_frag_size) {
1072 BUG_ON(rxcp->num_rcvd != 1);
1073 return;
1074 }
1075
1076 /* More frags present for this completion */
1077 index_inc(&rxcp->rxq_idx, rxq->len);
1078 remaining = rxcp->pkt_size - curr_frag_len;
1079 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1080 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1081 curr_frag_len = min(remaining, rx_frag_size);
1082
1083 /* Coalesce all frags from the same physical page in one slot */
1084 if (page_info->page_offset == 0) {
1085 /* Fresh page */
1086 j++;
1087 skb_shinfo(skb)->frags[j].page = page_info->page;
1088 skb_shinfo(skb)->frags[j].page_offset =
1089 page_info->page_offset;
1090 skb_shinfo(skb)->frags[j].size = 0;
1091 skb_shinfo(skb)->nr_frags++;
1092 } else {
1093 put_page(page_info->page);
1094 }
1095
1096 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1097 skb->len += curr_frag_len;
1098 skb->data_len += curr_frag_len;
1099
1100 remaining -= curr_frag_len;
1101 index_inc(&rxcp->rxq_idx, rxq->len);
1102 page_info->page = NULL;
1103 }
1104 BUG_ON(j > MAX_SKB_FRAGS);
1105 }
1106
1107 /* Process the RX completion indicated by rxcp when GRO is disabled */
1108 static void be_rx_compl_process(struct be_adapter *adapter,
1109 struct be_rx_obj *rxo,
1110 struct be_rx_compl_info *rxcp)
1111 {
1112 struct net_device *netdev = adapter->netdev;
1113 struct sk_buff *skb;
1114
1115 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1116 if (unlikely(!skb)) {
1117 if (net_ratelimit())
1118 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1119 be_rx_compl_discard(adapter, rxo, rxcp);
1120 return;
1121 }
1122
1123 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1124
1125 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1126 skb->ip_summed = CHECKSUM_UNNECESSARY;
1127 else
1128 skb_checksum_none_assert(skb);
1129
1130 skb->truesize = skb->len + sizeof(struct sk_buff);
1131 skb->protocol = eth_type_trans(skb, netdev);
1132 if (adapter->netdev->features & NETIF_F_RXHASH)
1133 skb->rxhash = rxcp->rss_hash;
1134
1135
1136 if (unlikely(rxcp->vlanf)) {
1137 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1138 kfree_skb(skb);
1139 return;
1140 }
1141 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1142 rxcp->vlan_tag);
1143 } else {
1144 netif_receive_skb(skb);
1145 }
1146 }
1147
1148 /* Process the RX completion indicated by rxcp when GRO is enabled */
1149 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1150 struct be_rx_obj *rxo,
1151 struct be_rx_compl_info *rxcp)
1152 {
1153 struct be_rx_page_info *page_info;
1154 struct sk_buff *skb = NULL;
1155 struct be_queue_info *rxq = &rxo->q;
1156 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1157 u16 remaining, curr_frag_len;
1158 u16 i, j;
1159
1160 skb = napi_get_frags(&eq_obj->napi);
1161 if (!skb) {
1162 be_rx_compl_discard(adapter, rxo, rxcp);
1163 return;
1164 }
1165
1166 remaining = rxcp->pkt_size;
1167 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1168 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1169
1170 curr_frag_len = min(remaining, rx_frag_size);
1171
1172 /* Coalesce all frags from the same physical page in one slot */
1173 if (i == 0 || page_info->page_offset == 0) {
1174 /* First frag or Fresh page */
1175 j++;
1176 skb_shinfo(skb)->frags[j].page = page_info->page;
1177 skb_shinfo(skb)->frags[j].page_offset =
1178 page_info->page_offset;
1179 skb_shinfo(skb)->frags[j].size = 0;
1180 } else {
1181 put_page(page_info->page);
1182 }
1183 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1184
1185 remaining -= curr_frag_len;
1186 index_inc(&rxcp->rxq_idx, rxq->len);
1187 memset(page_info, 0, sizeof(*page_info));
1188 }
1189 BUG_ON(j > MAX_SKB_FRAGS);
1190
1191 skb_shinfo(skb)->nr_frags = j + 1;
1192 skb->len = rxcp->pkt_size;
1193 skb->data_len = rxcp->pkt_size;
1194 skb->truesize += rxcp->pkt_size;
1195 skb->ip_summed = CHECKSUM_UNNECESSARY;
1196 if (adapter->netdev->features & NETIF_F_RXHASH)
1197 skb->rxhash = rxcp->rss_hash;
1198
1199 if (likely(!rxcp->vlanf))
1200 napi_gro_frags(&eq_obj->napi);
1201 else
1202 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1203 rxcp->vlan_tag);
1204 }
1205
1206 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1207 struct be_eth_rx_compl *compl,
1208 struct be_rx_compl_info *rxcp)
1209 {
1210 rxcp->pkt_size =
1211 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1212 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1213 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1214 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1215 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1216 rxcp->ip_csum =
1217 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1218 rxcp->l4_csum =
1219 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1220 rxcp->ipv6 =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1222 rxcp->rxq_idx =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1224 rxcp->num_rcvd =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1226 rxcp->pkt_type =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1228 rxcp->rss_hash =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1230 if (rxcp->vlanf) {
1231 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1232 compl);
1233 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1234 compl);
1235 }
1236 }
1237
1238 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1239 struct be_eth_rx_compl *compl,
1240 struct be_rx_compl_info *rxcp)
1241 {
1242 rxcp->pkt_size =
1243 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1244 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1245 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1246 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1247 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1248 rxcp->ip_csum =
1249 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1250 rxcp->l4_csum =
1251 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1252 rxcp->ipv6 =
1253 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1254 rxcp->rxq_idx =
1255 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1256 rxcp->num_rcvd =
1257 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1258 rxcp->pkt_type =
1259 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1260 rxcp->rss_hash =
1261 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1262 if (rxcp->vlanf) {
1263 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1264 compl);
1265 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1266 compl);
1267 }
1268 }
1269
1270 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1271 {
1272 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1273 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1274 struct be_adapter *adapter = rxo->adapter;
1275
1276 /* For checking the valid bit it is Ok to use either definition as the
1277 * valid bit is at the same position in both v0 and v1 Rx compl */
1278 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1279 return NULL;
1280
1281 rmb();
1282 be_dws_le_to_cpu(compl, sizeof(*compl));
1283
1284 if (adapter->be3_native)
1285 be_parse_rx_compl_v1(adapter, compl, rxcp);
1286 else
1287 be_parse_rx_compl_v0(adapter, compl, rxcp);
1288
1289 if (rxcp->vlanf) {
1290 /* vlanf could be wrongly set in some cards.
1291 * ignore if vtm is not set */
1292 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1293 rxcp->vlanf = 0;
1294
1295 if (!lancer_chip(adapter))
1296 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1297
1298 if (((adapter->pvid & VLAN_VID_MASK) ==
1299 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1300 !adapter->vlan_tag[rxcp->vlan_tag])
1301 rxcp->vlanf = 0;
1302 }
1303
1304 /* As the compl has been parsed, reset it; we wont touch it again */
1305 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1306
1307 queue_tail_inc(&rxo->cq);
1308 return rxcp;
1309 }
1310
1311 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1312 {
1313 u32 order = get_order(size);
1314
1315 if (order > 0)
1316 gfp |= __GFP_COMP;
1317 return alloc_pages(gfp, order);
1318 }
1319
1320 /*
1321 * Allocate a page, split it to fragments of size rx_frag_size and post as
1322 * receive buffers to BE
1323 */
1324 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1325 {
1326 struct be_adapter *adapter = rxo->adapter;
1327 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1328 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1329 struct be_queue_info *rxq = &rxo->q;
1330 struct page *pagep = NULL;
1331 struct be_eth_rx_d *rxd;
1332 u64 page_dmaaddr = 0, frag_dmaaddr;
1333 u32 posted, page_offset = 0;
1334
1335 page_info = &rxo->page_info_tbl[rxq->head];
1336 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1337 if (!pagep) {
1338 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1339 if (unlikely(!pagep)) {
1340 rxo->stats.rx_post_fail++;
1341 break;
1342 }
1343 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1344 0, adapter->big_page_size,
1345 DMA_FROM_DEVICE);
1346 page_info->page_offset = 0;
1347 } else {
1348 get_page(pagep);
1349 page_info->page_offset = page_offset + rx_frag_size;
1350 }
1351 page_offset = page_info->page_offset;
1352 page_info->page = pagep;
1353 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1354 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1355
1356 rxd = queue_head_node(rxq);
1357 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1358 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1359
1360 /* Any space left in the current big page for another frag? */
1361 if ((page_offset + rx_frag_size + rx_frag_size) >
1362 adapter->big_page_size) {
1363 pagep = NULL;
1364 page_info->last_page_user = true;
1365 }
1366
1367 prev_page_info = page_info;
1368 queue_head_inc(rxq);
1369 page_info = &page_info_tbl[rxq->head];
1370 }
1371 if (pagep)
1372 prev_page_info->last_page_user = true;
1373
1374 if (posted) {
1375 atomic_add(posted, &rxq->used);
1376 be_rxq_notify(adapter, rxq->id, posted);
1377 } else if (atomic_read(&rxq->used) == 0) {
1378 /* Let be_worker replenish when memory is available */
1379 rxo->rx_post_starved = true;
1380 }
1381 }
1382
1383 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1384 {
1385 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1386
1387 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1388 return NULL;
1389
1390 rmb();
1391 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1392
1393 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1394
1395 queue_tail_inc(tx_cq);
1396 return txcp;
1397 }
1398
1399 static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1400 {
1401 struct be_queue_info *txq = &adapter->tx_obj.q;
1402 struct be_eth_wrb *wrb;
1403 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1404 struct sk_buff *sent_skb;
1405 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1406 bool unmap_skb_hdr = true;
1407
1408 sent_skb = sent_skbs[txq->tail];
1409 BUG_ON(!sent_skb);
1410 sent_skbs[txq->tail] = NULL;
1411
1412 /* skip header wrb */
1413 queue_tail_inc(txq);
1414
1415 do {
1416 cur_index = txq->tail;
1417 wrb = queue_tail_node(txq);
1418 unmap_tx_frag(&adapter->pdev->dev, wrb,
1419 (unmap_skb_hdr && skb_headlen(sent_skb)));
1420 unmap_skb_hdr = false;
1421
1422 num_wrbs++;
1423 queue_tail_inc(txq);
1424 } while (cur_index != last_index);
1425
1426 kfree_skb(sent_skb);
1427 return num_wrbs;
1428 }
1429
1430 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1431 {
1432 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1433
1434 if (!eqe->evt)
1435 return NULL;
1436
1437 rmb();
1438 eqe->evt = le32_to_cpu(eqe->evt);
1439 queue_tail_inc(&eq_obj->q);
1440 return eqe;
1441 }
1442
1443 static int event_handle(struct be_adapter *adapter,
1444 struct be_eq_obj *eq_obj)
1445 {
1446 struct be_eq_entry *eqe;
1447 u16 num = 0;
1448
1449 while ((eqe = event_get(eq_obj)) != NULL) {
1450 eqe->evt = 0;
1451 num++;
1452 }
1453
1454 /* Deal with any spurious interrupts that come
1455 * without events
1456 */
1457 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1458 if (num)
1459 napi_schedule(&eq_obj->napi);
1460
1461 return num;
1462 }
1463
1464 /* Just read and notify events without processing them.
1465 * Used at the time of destroying event queues */
1466 static void be_eq_clean(struct be_adapter *adapter,
1467 struct be_eq_obj *eq_obj)
1468 {
1469 struct be_eq_entry *eqe;
1470 u16 num = 0;
1471
1472 while ((eqe = event_get(eq_obj)) != NULL) {
1473 eqe->evt = 0;
1474 num++;
1475 }
1476
1477 if (num)
1478 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1479 }
1480
1481 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1482 {
1483 struct be_rx_page_info *page_info;
1484 struct be_queue_info *rxq = &rxo->q;
1485 struct be_queue_info *rx_cq = &rxo->cq;
1486 struct be_rx_compl_info *rxcp;
1487 u16 tail;
1488
1489 /* First cleanup pending rx completions */
1490 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1491 be_rx_compl_discard(adapter, rxo, rxcp);
1492 be_cq_notify(adapter, rx_cq->id, false, 1);
1493 }
1494
1495 /* Then free posted rx buffer that were not used */
1496 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1497 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1498 page_info = get_rx_page_info(adapter, rxo, tail);
1499 put_page(page_info->page);
1500 memset(page_info, 0, sizeof(*page_info));
1501 }
1502 BUG_ON(atomic_read(&rxq->used));
1503 }
1504
1505 static void be_tx_compl_clean(struct be_adapter *adapter)
1506 {
1507 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1508 struct be_queue_info *txq = &adapter->tx_obj.q;
1509 struct be_eth_tx_compl *txcp;
1510 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1511 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1512 struct sk_buff *sent_skb;
1513 bool dummy_wrb;
1514
1515 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1516 do {
1517 while ((txcp = be_tx_compl_get(tx_cq))) {
1518 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1519 wrb_index, txcp);
1520 num_wrbs += be_tx_compl_process(adapter, end_idx);
1521 cmpl++;
1522 }
1523 if (cmpl) {
1524 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1525 atomic_sub(num_wrbs, &txq->used);
1526 cmpl = 0;
1527 num_wrbs = 0;
1528 }
1529
1530 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1531 break;
1532
1533 mdelay(1);
1534 } while (true);
1535
1536 if (atomic_read(&txq->used))
1537 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1538 atomic_read(&txq->used));
1539
1540 /* free posted tx for which compls will never arrive */
1541 while (atomic_read(&txq->used)) {
1542 sent_skb = sent_skbs[txq->tail];
1543 end_idx = txq->tail;
1544 index_adv(&end_idx,
1545 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1546 txq->len);
1547 num_wrbs = be_tx_compl_process(adapter, end_idx);
1548 atomic_sub(num_wrbs, &txq->used);
1549 }
1550 }
1551
1552 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1553 {
1554 struct be_queue_info *q;
1555
1556 q = &adapter->mcc_obj.q;
1557 if (q->created)
1558 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1559 be_queue_free(adapter, q);
1560
1561 q = &adapter->mcc_obj.cq;
1562 if (q->created)
1563 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1564 be_queue_free(adapter, q);
1565 }
1566
1567 /* Must be called only after TX qs are created as MCC shares TX EQ */
1568 static int be_mcc_queues_create(struct be_adapter *adapter)
1569 {
1570 struct be_queue_info *q, *cq;
1571
1572 /* Alloc MCC compl queue */
1573 cq = &adapter->mcc_obj.cq;
1574 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1575 sizeof(struct be_mcc_compl)))
1576 goto err;
1577
1578 /* Ask BE to create MCC compl queue; share TX's eq */
1579 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1580 goto mcc_cq_free;
1581
1582 /* Alloc MCC queue */
1583 q = &adapter->mcc_obj.q;
1584 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1585 goto mcc_cq_destroy;
1586
1587 /* Ask BE to create MCC queue */
1588 if (be_cmd_mccq_create(adapter, q, cq))
1589 goto mcc_q_free;
1590
1591 return 0;
1592
1593 mcc_q_free:
1594 be_queue_free(adapter, q);
1595 mcc_cq_destroy:
1596 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1597 mcc_cq_free:
1598 be_queue_free(adapter, cq);
1599 err:
1600 return -1;
1601 }
1602
1603 static void be_tx_queues_destroy(struct be_adapter *adapter)
1604 {
1605 struct be_queue_info *q;
1606
1607 q = &adapter->tx_obj.q;
1608 if (q->created)
1609 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1610 be_queue_free(adapter, q);
1611
1612 q = &adapter->tx_obj.cq;
1613 if (q->created)
1614 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1615 be_queue_free(adapter, q);
1616
1617 /* Clear any residual events */
1618 be_eq_clean(adapter, &adapter->tx_eq);
1619
1620 q = &adapter->tx_eq.q;
1621 if (q->created)
1622 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1623 be_queue_free(adapter, q);
1624 }
1625
1626 static int be_tx_queues_create(struct be_adapter *adapter)
1627 {
1628 struct be_queue_info *eq, *q, *cq;
1629
1630 adapter->tx_eq.max_eqd = 0;
1631 adapter->tx_eq.min_eqd = 0;
1632 adapter->tx_eq.cur_eqd = 96;
1633 adapter->tx_eq.enable_aic = false;
1634 /* Alloc Tx Event queue */
1635 eq = &adapter->tx_eq.q;
1636 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1637 return -1;
1638
1639 /* Ask BE to create Tx Event queue */
1640 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1641 goto tx_eq_free;
1642
1643 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1644
1645
1646 /* Alloc TX eth compl queue */
1647 cq = &adapter->tx_obj.cq;
1648 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1649 sizeof(struct be_eth_tx_compl)))
1650 goto tx_eq_destroy;
1651
1652 /* Ask BE to create Tx eth compl queue */
1653 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1654 goto tx_cq_free;
1655
1656 /* Alloc TX eth queue */
1657 q = &adapter->tx_obj.q;
1658 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1659 goto tx_cq_destroy;
1660
1661 /* Ask BE to create Tx eth queue */
1662 if (be_cmd_txq_create(adapter, q, cq))
1663 goto tx_q_free;
1664 return 0;
1665
1666 tx_q_free:
1667 be_queue_free(adapter, q);
1668 tx_cq_destroy:
1669 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1670 tx_cq_free:
1671 be_queue_free(adapter, cq);
1672 tx_eq_destroy:
1673 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1674 tx_eq_free:
1675 be_queue_free(adapter, eq);
1676 return -1;
1677 }
1678
1679 static void be_rx_queues_destroy(struct be_adapter *adapter)
1680 {
1681 struct be_queue_info *q;
1682 struct be_rx_obj *rxo;
1683 int i;
1684
1685 for_all_rx_queues(adapter, rxo, i) {
1686 q = &rxo->q;
1687 if (q->created) {
1688 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1689 /* After the rxq is invalidated, wait for a grace time
1690 * of 1ms for all dma to end and the flush compl to
1691 * arrive
1692 */
1693 mdelay(1);
1694 be_rx_q_clean(adapter, rxo);
1695 }
1696 be_queue_free(adapter, q);
1697
1698 q = &rxo->cq;
1699 if (q->created)
1700 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1701 be_queue_free(adapter, q);
1702
1703 /* Clear any residual events */
1704 q = &rxo->rx_eq.q;
1705 if (q->created) {
1706 be_eq_clean(adapter, &rxo->rx_eq);
1707 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1708 }
1709 be_queue_free(adapter, q);
1710 }
1711 }
1712
1713 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1714 {
1715 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1716 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1717 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1718 } else {
1719 dev_warn(&adapter->pdev->dev,
1720 "No support for multiple RX queues\n");
1721 return 1;
1722 }
1723 }
1724
1725 static int be_rx_queues_create(struct be_adapter *adapter)
1726 {
1727 struct be_queue_info *eq, *q, *cq;
1728 struct be_rx_obj *rxo;
1729 int rc, i;
1730
1731 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1732 msix_enabled(adapter) ?
1733 adapter->num_msix_vec - 1 : 1);
1734 if (adapter->num_rx_qs != MAX_RX_QS)
1735 dev_warn(&adapter->pdev->dev,
1736 "Can create only %d RX queues", adapter->num_rx_qs);
1737
1738 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1739 for_all_rx_queues(adapter, rxo, i) {
1740 rxo->adapter = adapter;
1741 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1742 rxo->rx_eq.enable_aic = true;
1743
1744 /* EQ */
1745 eq = &rxo->rx_eq.q;
1746 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1747 sizeof(struct be_eq_entry));
1748 if (rc)
1749 goto err;
1750
1751 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1752 if (rc)
1753 goto err;
1754
1755 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1756
1757 /* CQ */
1758 cq = &rxo->cq;
1759 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1760 sizeof(struct be_eth_rx_compl));
1761 if (rc)
1762 goto err;
1763
1764 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1765 if (rc)
1766 goto err;
1767 /* Rx Q */
1768 q = &rxo->q;
1769 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1770 sizeof(struct be_eth_rx_d));
1771 if (rc)
1772 goto err;
1773
1774 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1775 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1776 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1777 if (rc)
1778 goto err;
1779 }
1780
1781 if (be_multi_rxq(adapter)) {
1782 u8 rsstable[MAX_RSS_QS];
1783
1784 for_all_rss_queues(adapter, rxo, i)
1785 rsstable[i] = rxo->rss_id;
1786
1787 rc = be_cmd_rss_config(adapter, rsstable,
1788 adapter->num_rx_qs - 1);
1789 if (rc)
1790 goto err;
1791 }
1792
1793 return 0;
1794 err:
1795 be_rx_queues_destroy(adapter);
1796 return -1;
1797 }
1798
1799 static bool event_peek(struct be_eq_obj *eq_obj)
1800 {
1801 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1802 if (!eqe->evt)
1803 return false;
1804 else
1805 return true;
1806 }
1807
1808 static irqreturn_t be_intx(int irq, void *dev)
1809 {
1810 struct be_adapter *adapter = dev;
1811 struct be_rx_obj *rxo;
1812 int isr, i, tx = 0 , rx = 0;
1813
1814 if (lancer_chip(adapter)) {
1815 if (event_peek(&adapter->tx_eq))
1816 tx = event_handle(adapter, &adapter->tx_eq);
1817 for_all_rx_queues(adapter, rxo, i) {
1818 if (event_peek(&rxo->rx_eq))
1819 rx |= event_handle(adapter, &rxo->rx_eq);
1820 }
1821
1822 if (!(tx || rx))
1823 return IRQ_NONE;
1824
1825 } else {
1826 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1827 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1828 if (!isr)
1829 return IRQ_NONE;
1830
1831 if ((1 << adapter->tx_eq.eq_idx & isr))
1832 event_handle(adapter, &adapter->tx_eq);
1833
1834 for_all_rx_queues(adapter, rxo, i) {
1835 if ((1 << rxo->rx_eq.eq_idx & isr))
1836 event_handle(adapter, &rxo->rx_eq);
1837 }
1838 }
1839
1840 return IRQ_HANDLED;
1841 }
1842
1843 static irqreturn_t be_msix_rx(int irq, void *dev)
1844 {
1845 struct be_rx_obj *rxo = dev;
1846 struct be_adapter *adapter = rxo->adapter;
1847
1848 event_handle(adapter, &rxo->rx_eq);
1849
1850 return IRQ_HANDLED;
1851 }
1852
1853 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1854 {
1855 struct be_adapter *adapter = dev;
1856
1857 event_handle(adapter, &adapter->tx_eq);
1858
1859 return IRQ_HANDLED;
1860 }
1861
1862 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1863 {
1864 return (rxcp->tcpf && !rxcp->err) ? true : false;
1865 }
1866
1867 static int be_poll_rx(struct napi_struct *napi, int budget)
1868 {
1869 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1870 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1871 struct be_adapter *adapter = rxo->adapter;
1872 struct be_queue_info *rx_cq = &rxo->cq;
1873 struct be_rx_compl_info *rxcp;
1874 u32 work_done;
1875
1876 rxo->stats.rx_polls++;
1877 for (work_done = 0; work_done < budget; work_done++) {
1878 rxcp = be_rx_compl_get(rxo);
1879 if (!rxcp)
1880 break;
1881
1882 /* Ignore flush completions */
1883 if (rxcp->num_rcvd && rxcp->pkt_size) {
1884 if (do_gro(rxcp))
1885 be_rx_compl_process_gro(adapter, rxo, rxcp);
1886 else
1887 be_rx_compl_process(adapter, rxo, rxcp);
1888 } else if (rxcp->pkt_size == 0) {
1889 be_rx_compl_discard(adapter, rxo, rxcp);
1890 }
1891
1892 be_rx_stats_update(rxo, rxcp);
1893 }
1894
1895 /* Refill the queue */
1896 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1897 be_post_rx_frags(rxo, GFP_ATOMIC);
1898
1899 /* All consumed */
1900 if (work_done < budget) {
1901 napi_complete(napi);
1902 be_cq_notify(adapter, rx_cq->id, true, work_done);
1903 } else {
1904 /* More to be consumed; continue with interrupts disabled */
1905 be_cq_notify(adapter, rx_cq->id, false, work_done);
1906 }
1907 return work_done;
1908 }
1909
1910 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1911 * For TX/MCC we don't honour budget; consume everything
1912 */
1913 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1914 {
1915 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1916 struct be_adapter *adapter =
1917 container_of(tx_eq, struct be_adapter, tx_eq);
1918 struct be_queue_info *txq = &adapter->tx_obj.q;
1919 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1920 struct be_eth_tx_compl *txcp;
1921 int tx_compl = 0, mcc_compl, status = 0;
1922 u16 end_idx, num_wrbs = 0;
1923
1924 while ((txcp = be_tx_compl_get(tx_cq))) {
1925 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1926 wrb_index, txcp);
1927 num_wrbs += be_tx_compl_process(adapter, end_idx);
1928 tx_compl++;
1929 }
1930
1931 mcc_compl = be_process_mcc(adapter, &status);
1932
1933 napi_complete(napi);
1934
1935 if (mcc_compl) {
1936 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1937 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1938 }
1939
1940 if (tx_compl) {
1941 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1942
1943 atomic_sub(num_wrbs, &txq->used);
1944
1945 /* As Tx wrbs have been freed up, wake up netdev queue if
1946 * it was stopped due to lack of tx wrbs.
1947 */
1948 if (netif_queue_stopped(adapter->netdev) &&
1949 atomic_read(&txq->used) < txq->len / 2) {
1950 netif_wake_queue(adapter->netdev);
1951 }
1952
1953 tx_stats(adapter)->be_tx_events++;
1954 tx_stats(adapter)->be_tx_compl += tx_compl;
1955 }
1956
1957 return 1;
1958 }
1959
1960 void be_detect_dump_ue(struct be_adapter *adapter)
1961 {
1962 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1963 u32 i;
1964
1965 pci_read_config_dword(adapter->pdev,
1966 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1967 pci_read_config_dword(adapter->pdev,
1968 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1969 pci_read_config_dword(adapter->pdev,
1970 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1971 pci_read_config_dword(adapter->pdev,
1972 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1973
1974 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1975 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1976
1977 if (ue_status_lo || ue_status_hi) {
1978 adapter->ue_detected = true;
1979 adapter->eeh_err = true;
1980 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1981 }
1982
1983 if (ue_status_lo) {
1984 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1985 if (ue_status_lo & 1)
1986 dev_err(&adapter->pdev->dev,
1987 "UE: %s bit set\n", ue_status_low_desc[i]);
1988 }
1989 }
1990 if (ue_status_hi) {
1991 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1992 if (ue_status_hi & 1)
1993 dev_err(&adapter->pdev->dev,
1994 "UE: %s bit set\n", ue_status_hi_desc[i]);
1995 }
1996 }
1997
1998 }
1999
2000 static void be_worker(struct work_struct *work)
2001 {
2002 struct be_adapter *adapter =
2003 container_of(work, struct be_adapter, work.work);
2004 struct be_rx_obj *rxo;
2005 int i;
2006
2007 if (!adapter->ue_detected && !lancer_chip(adapter))
2008 be_detect_dump_ue(adapter);
2009
2010 /* when interrupts are not yet enabled, just reap any pending
2011 * mcc completions */
2012 if (!netif_running(adapter->netdev)) {
2013 int mcc_compl, status = 0;
2014
2015 mcc_compl = be_process_mcc(adapter, &status);
2016
2017 if (mcc_compl) {
2018 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2019 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2020 }
2021
2022 goto reschedule;
2023 }
2024
2025 if (!adapter->stats_cmd_sent)
2026 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2027
2028 be_tx_rate_update(adapter);
2029
2030 for_all_rx_queues(adapter, rxo, i) {
2031 be_rx_rate_update(rxo);
2032 be_rx_eqd_update(adapter, rxo);
2033
2034 if (rxo->rx_post_starved) {
2035 rxo->rx_post_starved = false;
2036 be_post_rx_frags(rxo, GFP_KERNEL);
2037 }
2038 }
2039
2040 reschedule:
2041 adapter->work_counter++;
2042 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2043 }
2044
2045 static void be_msix_disable(struct be_adapter *adapter)
2046 {
2047 if (msix_enabled(adapter)) {
2048 pci_disable_msix(adapter->pdev);
2049 adapter->num_msix_vec = 0;
2050 }
2051 }
2052
2053 static void be_msix_enable(struct be_adapter *adapter)
2054 {
2055 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2056 int i, status, num_vec;
2057
2058 num_vec = be_num_rxqs_want(adapter) + 1;
2059
2060 for (i = 0; i < num_vec; i++)
2061 adapter->msix_entries[i].entry = i;
2062
2063 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2064 if (status == 0) {
2065 goto done;
2066 } else if (status >= BE_MIN_MSIX_VECTORS) {
2067 num_vec = status;
2068 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2069 num_vec) == 0)
2070 goto done;
2071 }
2072 return;
2073 done:
2074 adapter->num_msix_vec = num_vec;
2075 return;
2076 }
2077
2078 static void be_sriov_enable(struct be_adapter *adapter)
2079 {
2080 be_check_sriov_fn_type(adapter);
2081 #ifdef CONFIG_PCI_IOV
2082 if (be_physfn(adapter) && num_vfs) {
2083 int status, pos;
2084 u16 nvfs;
2085
2086 pos = pci_find_ext_capability(adapter->pdev,
2087 PCI_EXT_CAP_ID_SRIOV);
2088 pci_read_config_word(adapter->pdev,
2089 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2090
2091 if (num_vfs > nvfs) {
2092 dev_info(&adapter->pdev->dev,
2093 "Device supports %d VFs and not %d\n",
2094 nvfs, num_vfs);
2095 num_vfs = nvfs;
2096 }
2097
2098 status = pci_enable_sriov(adapter->pdev, num_vfs);
2099 adapter->sriov_enabled = status ? false : true;
2100 }
2101 #endif
2102 }
2103
2104 static void be_sriov_disable(struct be_adapter *adapter)
2105 {
2106 #ifdef CONFIG_PCI_IOV
2107 if (adapter->sriov_enabled) {
2108 pci_disable_sriov(adapter->pdev);
2109 adapter->sriov_enabled = false;
2110 }
2111 #endif
2112 }
2113
2114 static inline int be_msix_vec_get(struct be_adapter *adapter,
2115 struct be_eq_obj *eq_obj)
2116 {
2117 return adapter->msix_entries[eq_obj->eq_idx].vector;
2118 }
2119
2120 static int be_request_irq(struct be_adapter *adapter,
2121 struct be_eq_obj *eq_obj,
2122 void *handler, char *desc, void *context)
2123 {
2124 struct net_device *netdev = adapter->netdev;
2125 int vec;
2126
2127 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2128 vec = be_msix_vec_get(adapter, eq_obj);
2129 return request_irq(vec, handler, 0, eq_obj->desc, context);
2130 }
2131
2132 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2133 void *context)
2134 {
2135 int vec = be_msix_vec_get(adapter, eq_obj);
2136 free_irq(vec, context);
2137 }
2138
2139 static int be_msix_register(struct be_adapter *adapter)
2140 {
2141 struct be_rx_obj *rxo;
2142 int status, i;
2143 char qname[10];
2144
2145 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2146 adapter);
2147 if (status)
2148 goto err;
2149
2150 for_all_rx_queues(adapter, rxo, i) {
2151 sprintf(qname, "rxq%d", i);
2152 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2153 qname, rxo);
2154 if (status)
2155 goto err_msix;
2156 }
2157
2158 return 0;
2159
2160 err_msix:
2161 be_free_irq(adapter, &adapter->tx_eq, adapter);
2162
2163 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2164 be_free_irq(adapter, &rxo->rx_eq, rxo);
2165
2166 err:
2167 dev_warn(&adapter->pdev->dev,
2168 "MSIX Request IRQ failed - err %d\n", status);
2169 be_msix_disable(adapter);
2170 return status;
2171 }
2172
2173 static int be_irq_register(struct be_adapter *adapter)
2174 {
2175 struct net_device *netdev = adapter->netdev;
2176 int status;
2177
2178 if (msix_enabled(adapter)) {
2179 status = be_msix_register(adapter);
2180 if (status == 0)
2181 goto done;
2182 /* INTx is not supported for VF */
2183 if (!be_physfn(adapter))
2184 return status;
2185 }
2186
2187 /* INTx */
2188 netdev->irq = adapter->pdev->irq;
2189 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2190 adapter);
2191 if (status) {
2192 dev_err(&adapter->pdev->dev,
2193 "INTx request IRQ failed - err %d\n", status);
2194 return status;
2195 }
2196 done:
2197 adapter->isr_registered = true;
2198 return 0;
2199 }
2200
2201 static void be_irq_unregister(struct be_adapter *adapter)
2202 {
2203 struct net_device *netdev = adapter->netdev;
2204 struct be_rx_obj *rxo;
2205 int i;
2206
2207 if (!adapter->isr_registered)
2208 return;
2209
2210 /* INTx */
2211 if (!msix_enabled(adapter)) {
2212 free_irq(netdev->irq, adapter);
2213 goto done;
2214 }
2215
2216 /* MSIx */
2217 be_free_irq(adapter, &adapter->tx_eq, adapter);
2218
2219 for_all_rx_queues(adapter, rxo, i)
2220 be_free_irq(adapter, &rxo->rx_eq, rxo);
2221
2222 done:
2223 adapter->isr_registered = false;
2224 }
2225
2226 static int be_close(struct net_device *netdev)
2227 {
2228 struct be_adapter *adapter = netdev_priv(netdev);
2229 struct be_rx_obj *rxo;
2230 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2231 int vec, i;
2232
2233 be_async_mcc_disable(adapter);
2234
2235 netif_carrier_off(netdev);
2236 adapter->link_up = false;
2237
2238 if (!lancer_chip(adapter))
2239 be_intr_set(adapter, false);
2240
2241 for_all_rx_queues(adapter, rxo, i)
2242 napi_disable(&rxo->rx_eq.napi);
2243
2244 napi_disable(&tx_eq->napi);
2245
2246 if (lancer_chip(adapter)) {
2247 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2248 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2249 for_all_rx_queues(adapter, rxo, i)
2250 be_cq_notify(adapter, rxo->cq.id, false, 0);
2251 }
2252
2253 if (msix_enabled(adapter)) {
2254 vec = be_msix_vec_get(adapter, tx_eq);
2255 synchronize_irq(vec);
2256
2257 for_all_rx_queues(adapter, rxo, i) {
2258 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2259 synchronize_irq(vec);
2260 }
2261 } else {
2262 synchronize_irq(netdev->irq);
2263 }
2264 be_irq_unregister(adapter);
2265
2266 /* Wait for all pending tx completions to arrive so that
2267 * all tx skbs are freed.
2268 */
2269 be_tx_compl_clean(adapter);
2270
2271 return 0;
2272 }
2273
2274 static int be_open(struct net_device *netdev)
2275 {
2276 struct be_adapter *adapter = netdev_priv(netdev);
2277 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2278 struct be_rx_obj *rxo;
2279 bool link_up;
2280 int status, i;
2281 u8 mac_speed;
2282 u16 link_speed;
2283
2284 for_all_rx_queues(adapter, rxo, i) {
2285 be_post_rx_frags(rxo, GFP_KERNEL);
2286 napi_enable(&rxo->rx_eq.napi);
2287 }
2288 napi_enable(&tx_eq->napi);
2289
2290 be_irq_register(adapter);
2291
2292 if (!lancer_chip(adapter))
2293 be_intr_set(adapter, true);
2294
2295 /* The evt queues are created in unarmed state; arm them */
2296 for_all_rx_queues(adapter, rxo, i) {
2297 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2298 be_cq_notify(adapter, rxo->cq.id, true, 0);
2299 }
2300 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2301
2302 /* Now that interrupts are on we can process async mcc */
2303 be_async_mcc_enable(adapter);
2304
2305 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2306 &link_speed, 0);
2307 if (status)
2308 goto err;
2309 be_link_status_update(adapter, link_up);
2310
2311 if (be_physfn(adapter)) {
2312 status = be_vid_config(adapter, false, 0);
2313 if (status)
2314 goto err;
2315
2316 status = be_cmd_set_flow_control(adapter,
2317 adapter->tx_fc, adapter->rx_fc);
2318 if (status)
2319 goto err;
2320 }
2321
2322 return 0;
2323 err:
2324 be_close(adapter->netdev);
2325 return -EIO;
2326 }
2327
2328 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2329 {
2330 struct be_dma_mem cmd;
2331 int status = 0;
2332 u8 mac[ETH_ALEN];
2333
2334 memset(mac, 0, ETH_ALEN);
2335
2336 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2337 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2338 GFP_KERNEL);
2339 if (cmd.va == NULL)
2340 return -1;
2341 memset(cmd.va, 0, cmd.size);
2342
2343 if (enable) {
2344 status = pci_write_config_dword(adapter->pdev,
2345 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2346 if (status) {
2347 dev_err(&adapter->pdev->dev,
2348 "Could not enable Wake-on-lan\n");
2349 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2350 cmd.dma);
2351 return status;
2352 }
2353 status = be_cmd_enable_magic_wol(adapter,
2354 adapter->netdev->dev_addr, &cmd);
2355 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2356 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2357 } else {
2358 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2359 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2360 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2361 }
2362
2363 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2364 return status;
2365 }
2366
2367 /*
2368 * Generate a seed MAC address from the PF MAC Address using jhash.
2369 * MAC Address for VFs are assigned incrementally starting from the seed.
2370 * These addresses are programmed in the ASIC by the PF and the VF driver
2371 * queries for the MAC address during its probe.
2372 */
2373 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2374 {
2375 u32 vf = 0;
2376 int status = 0;
2377 u8 mac[ETH_ALEN];
2378
2379 be_vf_eth_addr_generate(adapter, mac);
2380
2381 for (vf = 0; vf < num_vfs; vf++) {
2382 status = be_cmd_pmac_add(adapter, mac,
2383 adapter->vf_cfg[vf].vf_if_handle,
2384 &adapter->vf_cfg[vf].vf_pmac_id,
2385 vf + 1);
2386 if (status)
2387 dev_err(&adapter->pdev->dev,
2388 "Mac address add failed for VF %d\n", vf);
2389 else
2390 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2391
2392 mac[5] += 1;
2393 }
2394 return status;
2395 }
2396
2397 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2398 {
2399 u32 vf;
2400
2401 for (vf = 0; vf < num_vfs; vf++) {
2402 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2403 be_cmd_pmac_del(adapter,
2404 adapter->vf_cfg[vf].vf_if_handle,
2405 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2406 }
2407 }
2408
2409 static int be_setup(struct be_adapter *adapter)
2410 {
2411 struct net_device *netdev = adapter->netdev;
2412 u32 cap_flags, en_flags, vf = 0;
2413 int status;
2414 u8 mac[ETH_ALEN];
2415
2416 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2417 BE_IF_FLAGS_BROADCAST |
2418 BE_IF_FLAGS_MULTICAST;
2419
2420 if (be_physfn(adapter)) {
2421 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2422 BE_IF_FLAGS_PROMISCUOUS |
2423 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2424 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2425
2426 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2427 cap_flags |= BE_IF_FLAGS_RSS;
2428 en_flags |= BE_IF_FLAGS_RSS;
2429 }
2430 }
2431
2432 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2433 netdev->dev_addr, false/* pmac_invalid */,
2434 &adapter->if_handle, &adapter->pmac_id, 0);
2435 if (status != 0)
2436 goto do_none;
2437
2438 if (be_physfn(adapter)) {
2439 if (adapter->sriov_enabled) {
2440 while (vf < num_vfs) {
2441 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2442 BE_IF_FLAGS_BROADCAST;
2443 status = be_cmd_if_create(adapter, cap_flags,
2444 en_flags, mac, true,
2445 &adapter->vf_cfg[vf].vf_if_handle,
2446 NULL, vf+1);
2447 if (status) {
2448 dev_err(&adapter->pdev->dev,
2449 "Interface Create failed for VF %d\n",
2450 vf);
2451 goto if_destroy;
2452 }
2453 adapter->vf_cfg[vf].vf_pmac_id =
2454 BE_INVALID_PMAC_ID;
2455 vf++;
2456 }
2457 }
2458 } else {
2459 status = be_cmd_mac_addr_query(adapter, mac,
2460 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2461 if (!status) {
2462 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2463 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2464 }
2465 }
2466
2467 status = be_tx_queues_create(adapter);
2468 if (status != 0)
2469 goto if_destroy;
2470
2471 status = be_rx_queues_create(adapter);
2472 if (status != 0)
2473 goto tx_qs_destroy;
2474
2475 status = be_mcc_queues_create(adapter);
2476 if (status != 0)
2477 goto rx_qs_destroy;
2478
2479 adapter->link_speed = -1;
2480
2481 return 0;
2482
2483 rx_qs_destroy:
2484 be_rx_queues_destroy(adapter);
2485 tx_qs_destroy:
2486 be_tx_queues_destroy(adapter);
2487 if_destroy:
2488 if (be_physfn(adapter) && adapter->sriov_enabled)
2489 for (vf = 0; vf < num_vfs; vf++)
2490 if (adapter->vf_cfg[vf].vf_if_handle)
2491 be_cmd_if_destroy(adapter,
2492 adapter->vf_cfg[vf].vf_if_handle,
2493 vf + 1);
2494 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2495 do_none:
2496 return status;
2497 }
2498
2499 static int be_clear(struct be_adapter *adapter)
2500 {
2501 int vf;
2502
2503 if (be_physfn(adapter) && adapter->sriov_enabled)
2504 be_vf_eth_addr_rem(adapter);
2505
2506 be_mcc_queues_destroy(adapter);
2507 be_rx_queues_destroy(adapter);
2508 be_tx_queues_destroy(adapter);
2509 adapter->eq_next_idx = 0;
2510
2511 if (be_physfn(adapter) && adapter->sriov_enabled)
2512 for (vf = 0; vf < num_vfs; vf++)
2513 if (adapter->vf_cfg[vf].vf_if_handle)
2514 be_cmd_if_destroy(adapter,
2515 adapter->vf_cfg[vf].vf_if_handle,
2516 vf + 1);
2517
2518 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2519
2520 /* tell fw we're done with firing cmds */
2521 be_cmd_fw_clean(adapter);
2522 return 0;
2523 }
2524
2525
2526 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2527 static bool be_flash_redboot(struct be_adapter *adapter,
2528 const u8 *p, u32 img_start, int image_size,
2529 int hdr_size)
2530 {
2531 u32 crc_offset;
2532 u8 flashed_crc[4];
2533 int status;
2534
2535 crc_offset = hdr_size + img_start + image_size - 4;
2536
2537 p += crc_offset;
2538
2539 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2540 (image_size - 4));
2541 if (status) {
2542 dev_err(&adapter->pdev->dev,
2543 "could not get crc from flash, not flashing redboot\n");
2544 return false;
2545 }
2546
2547 /*update redboot only if crc does not match*/
2548 if (!memcmp(flashed_crc, p, 4))
2549 return false;
2550 else
2551 return true;
2552 }
2553
2554 static int be_flash_data(struct be_adapter *adapter,
2555 const struct firmware *fw,
2556 struct be_dma_mem *flash_cmd, int num_of_images)
2557
2558 {
2559 int status = 0, i, filehdr_size = 0;
2560 u32 total_bytes = 0, flash_op;
2561 int num_bytes;
2562 const u8 *p = fw->data;
2563 struct be_cmd_write_flashrom *req = flash_cmd->va;
2564 const struct flash_comp *pflashcomp;
2565 int num_comp;
2566
2567 static const struct flash_comp gen3_flash_types[9] = {
2568 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2569 FLASH_IMAGE_MAX_SIZE_g3},
2570 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2571 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2572 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2573 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2574 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2575 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2576 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2577 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2578 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2579 FLASH_IMAGE_MAX_SIZE_g3},
2580 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2581 FLASH_IMAGE_MAX_SIZE_g3},
2582 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2583 FLASH_IMAGE_MAX_SIZE_g3},
2584 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2585 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2586 };
2587 static const struct flash_comp gen2_flash_types[8] = {
2588 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2589 FLASH_IMAGE_MAX_SIZE_g2},
2590 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2591 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2592 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2593 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2594 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2595 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2596 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2597 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2598 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2599 FLASH_IMAGE_MAX_SIZE_g2},
2600 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2601 FLASH_IMAGE_MAX_SIZE_g2},
2602 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2603 FLASH_IMAGE_MAX_SIZE_g2}
2604 };
2605
2606 if (adapter->generation == BE_GEN3) {
2607 pflashcomp = gen3_flash_types;
2608 filehdr_size = sizeof(struct flash_file_hdr_g3);
2609 num_comp = ARRAY_SIZE(gen3_flash_types);
2610 } else {
2611 pflashcomp = gen2_flash_types;
2612 filehdr_size = sizeof(struct flash_file_hdr_g2);
2613 num_comp = ARRAY_SIZE(gen2_flash_types);
2614 }
2615 for (i = 0; i < num_comp; i++) {
2616 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2617 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2618 continue;
2619 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2620 (!be_flash_redboot(adapter, fw->data,
2621 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2622 (num_of_images * sizeof(struct image_hdr)))))
2623 continue;
2624 p = fw->data;
2625 p += filehdr_size + pflashcomp[i].offset
2626 + (num_of_images * sizeof(struct image_hdr));
2627 if (p + pflashcomp[i].size > fw->data + fw->size)
2628 return -1;
2629 total_bytes = pflashcomp[i].size;
2630 while (total_bytes) {
2631 if (total_bytes > 32*1024)
2632 num_bytes = 32*1024;
2633 else
2634 num_bytes = total_bytes;
2635 total_bytes -= num_bytes;
2636
2637 if (!total_bytes)
2638 flash_op = FLASHROM_OPER_FLASH;
2639 else
2640 flash_op = FLASHROM_OPER_SAVE;
2641 memcpy(req->params.data_buf, p, num_bytes);
2642 p += num_bytes;
2643 status = be_cmd_write_flashrom(adapter, flash_cmd,
2644 pflashcomp[i].optype, flash_op, num_bytes);
2645 if (status) {
2646 dev_err(&adapter->pdev->dev,
2647 "cmd to write to flash rom failed.\n");
2648 return -1;
2649 }
2650 yield();
2651 }
2652 }
2653 return 0;
2654 }
2655
2656 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2657 {
2658 if (fhdr == NULL)
2659 return 0;
2660 if (fhdr->build[0] == '3')
2661 return BE_GEN3;
2662 else if (fhdr->build[0] == '2')
2663 return BE_GEN2;
2664 else
2665 return 0;
2666 }
2667
2668 int be_load_fw(struct be_adapter *adapter, u8 *func)
2669 {
2670 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2671 const struct firmware *fw;
2672 struct flash_file_hdr_g2 *fhdr;
2673 struct flash_file_hdr_g3 *fhdr3;
2674 struct image_hdr *img_hdr_ptr = NULL;
2675 struct be_dma_mem flash_cmd;
2676 int status, i = 0, num_imgs = 0;
2677 const u8 *p;
2678
2679 if (!netif_running(adapter->netdev)) {
2680 dev_err(&adapter->pdev->dev,
2681 "Firmware load not allowed (interface is down)\n");
2682 return -EPERM;
2683 }
2684
2685 strcpy(fw_file, func);
2686
2687 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2688 if (status)
2689 goto fw_exit;
2690
2691 p = fw->data;
2692 fhdr = (struct flash_file_hdr_g2 *) p;
2693 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2694
2695 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2696 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2697 &flash_cmd.dma, GFP_KERNEL);
2698 if (!flash_cmd.va) {
2699 status = -ENOMEM;
2700 dev_err(&adapter->pdev->dev,
2701 "Memory allocation failure while flashing\n");
2702 goto fw_exit;
2703 }
2704
2705 if ((adapter->generation == BE_GEN3) &&
2706 (get_ufigen_type(fhdr) == BE_GEN3)) {
2707 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2708 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2709 for (i = 0; i < num_imgs; i++) {
2710 img_hdr_ptr = (struct image_hdr *) (fw->data +
2711 (sizeof(struct flash_file_hdr_g3) +
2712 i * sizeof(struct image_hdr)));
2713 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2714 status = be_flash_data(adapter, fw, &flash_cmd,
2715 num_imgs);
2716 }
2717 } else if ((adapter->generation == BE_GEN2) &&
2718 (get_ufigen_type(fhdr) == BE_GEN2)) {
2719 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2720 } else {
2721 dev_err(&adapter->pdev->dev,
2722 "UFI and Interface are not compatible for flashing\n");
2723 status = -1;
2724 }
2725
2726 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2727 flash_cmd.dma);
2728 if (status) {
2729 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2730 goto fw_exit;
2731 }
2732
2733 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2734
2735 fw_exit:
2736 release_firmware(fw);
2737 return status;
2738 }
2739
2740 static struct net_device_ops be_netdev_ops = {
2741 .ndo_open = be_open,
2742 .ndo_stop = be_close,
2743 .ndo_start_xmit = be_xmit,
2744 .ndo_set_rx_mode = be_set_multicast_list,
2745 .ndo_set_mac_address = be_mac_addr_set,
2746 .ndo_change_mtu = be_change_mtu,
2747 .ndo_validate_addr = eth_validate_addr,
2748 .ndo_vlan_rx_register = be_vlan_register,
2749 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2750 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2751 .ndo_set_vf_mac = be_set_vf_mac,
2752 .ndo_set_vf_vlan = be_set_vf_vlan,
2753 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2754 .ndo_get_vf_config = be_get_vf_config
2755 };
2756
2757 static void be_netdev_init(struct net_device *netdev)
2758 {
2759 struct be_adapter *adapter = netdev_priv(netdev);
2760 struct be_rx_obj *rxo;
2761 int i;
2762
2763 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2764 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2765 NETIF_F_HW_VLAN_TX;
2766 if (be_multi_rxq(adapter))
2767 netdev->hw_features |= NETIF_F_RXHASH;
2768
2769 netdev->features |= netdev->hw_features |
2770 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2771
2772 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2773 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2774
2775 if (lancer_chip(adapter))
2776 netdev->vlan_features |= NETIF_F_TSO6;
2777
2778 netdev->flags |= IFF_MULTICAST;
2779
2780 /* Default settings for Rx and Tx flow control */
2781 adapter->rx_fc = true;
2782 adapter->tx_fc = true;
2783
2784 netif_set_gso_max_size(netdev, 65535);
2785
2786 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2787
2788 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2789
2790 for_all_rx_queues(adapter, rxo, i)
2791 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2792 BE_NAPI_WEIGHT);
2793
2794 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2795 BE_NAPI_WEIGHT);
2796 }
2797
2798 static void be_unmap_pci_bars(struct be_adapter *adapter)
2799 {
2800 if (adapter->csr)
2801 iounmap(adapter->csr);
2802 if (adapter->db)
2803 iounmap(adapter->db);
2804 if (adapter->pcicfg && be_physfn(adapter))
2805 iounmap(adapter->pcicfg);
2806 }
2807
2808 static int be_map_pci_bars(struct be_adapter *adapter)
2809 {
2810 u8 __iomem *addr;
2811 int pcicfg_reg, db_reg;
2812
2813 if (lancer_chip(adapter)) {
2814 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2815 pci_resource_len(adapter->pdev, 0));
2816 if (addr == NULL)
2817 return -ENOMEM;
2818 adapter->db = addr;
2819 return 0;
2820 }
2821
2822 if (be_physfn(adapter)) {
2823 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2824 pci_resource_len(adapter->pdev, 2));
2825 if (addr == NULL)
2826 return -ENOMEM;
2827 adapter->csr = addr;
2828 }
2829
2830 if (adapter->generation == BE_GEN2) {
2831 pcicfg_reg = 1;
2832 db_reg = 4;
2833 } else {
2834 pcicfg_reg = 0;
2835 if (be_physfn(adapter))
2836 db_reg = 4;
2837 else
2838 db_reg = 0;
2839 }
2840 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2841 pci_resource_len(adapter->pdev, db_reg));
2842 if (addr == NULL)
2843 goto pci_map_err;
2844 adapter->db = addr;
2845
2846 if (be_physfn(adapter)) {
2847 addr = ioremap_nocache(
2848 pci_resource_start(adapter->pdev, pcicfg_reg),
2849 pci_resource_len(adapter->pdev, pcicfg_reg));
2850 if (addr == NULL)
2851 goto pci_map_err;
2852 adapter->pcicfg = addr;
2853 } else
2854 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2855
2856 return 0;
2857 pci_map_err:
2858 be_unmap_pci_bars(adapter);
2859 return -ENOMEM;
2860 }
2861
2862
2863 static void be_ctrl_cleanup(struct be_adapter *adapter)
2864 {
2865 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2866
2867 be_unmap_pci_bars(adapter);
2868
2869 if (mem->va)
2870 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2871 mem->dma);
2872
2873 mem = &adapter->mc_cmd_mem;
2874 if (mem->va)
2875 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2876 mem->dma);
2877 }
2878
2879 static int be_ctrl_init(struct be_adapter *adapter)
2880 {
2881 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2882 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2883 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2884 int status;
2885
2886 status = be_map_pci_bars(adapter);
2887 if (status)
2888 goto done;
2889
2890 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2891 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2892 mbox_mem_alloc->size,
2893 &mbox_mem_alloc->dma,
2894 GFP_KERNEL);
2895 if (!mbox_mem_alloc->va) {
2896 status = -ENOMEM;
2897 goto unmap_pci_bars;
2898 }
2899
2900 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2901 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2902 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2903 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2904
2905 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2906 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2907 mc_cmd_mem->size, &mc_cmd_mem->dma,
2908 GFP_KERNEL);
2909 if (mc_cmd_mem->va == NULL) {
2910 status = -ENOMEM;
2911 goto free_mbox;
2912 }
2913 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2914
2915 mutex_init(&adapter->mbox_lock);
2916 spin_lock_init(&adapter->mcc_lock);
2917 spin_lock_init(&adapter->mcc_cq_lock);
2918
2919 init_completion(&adapter->flash_compl);
2920 pci_save_state(adapter->pdev);
2921 return 0;
2922
2923 free_mbox:
2924 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2925 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2926
2927 unmap_pci_bars:
2928 be_unmap_pci_bars(adapter);
2929
2930 done:
2931 return status;
2932 }
2933
2934 static void be_stats_cleanup(struct be_adapter *adapter)
2935 {
2936 struct be_dma_mem *cmd = &adapter->stats_cmd;
2937
2938 if (cmd->va)
2939 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2940 cmd->va, cmd->dma);
2941 }
2942
2943 static int be_stats_init(struct be_adapter *adapter)
2944 {
2945 struct be_dma_mem *cmd = &adapter->stats_cmd;
2946
2947 if (adapter->generation == BE_GEN2)
2948 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
2949 else
2950 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
2951 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2952 GFP_KERNEL);
2953 if (cmd->va == NULL)
2954 return -1;
2955 memset(cmd->va, 0, cmd->size);
2956 return 0;
2957 }
2958
2959 static void __devexit be_remove(struct pci_dev *pdev)
2960 {
2961 struct be_adapter *adapter = pci_get_drvdata(pdev);
2962
2963 if (!adapter)
2964 return;
2965
2966 cancel_delayed_work_sync(&adapter->work);
2967
2968 unregister_netdev(adapter->netdev);
2969
2970 be_clear(adapter);
2971
2972 be_stats_cleanup(adapter);
2973
2974 be_ctrl_cleanup(adapter);
2975
2976 kfree(adapter->vf_cfg);
2977 be_sriov_disable(adapter);
2978
2979 be_msix_disable(adapter);
2980
2981 pci_set_drvdata(pdev, NULL);
2982 pci_release_regions(pdev);
2983 pci_disable_device(pdev);
2984
2985 free_netdev(adapter->netdev);
2986 }
2987
2988 static int be_get_config(struct be_adapter *adapter)
2989 {
2990 int status;
2991 u8 mac[ETH_ALEN];
2992
2993 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2994 if (status)
2995 return status;
2996
2997 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2998 &adapter->function_mode, &adapter->function_caps);
2999 if (status)
3000 return status;
3001
3002 memset(mac, 0, ETH_ALEN);
3003
3004 if (be_physfn(adapter)) {
3005 status = be_cmd_mac_addr_query(adapter, mac,
3006 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3007
3008 if (status)
3009 return status;
3010
3011 if (!is_valid_ether_addr(mac))
3012 return -EADDRNOTAVAIL;
3013
3014 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3015 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3016 }
3017
3018 if (adapter->function_mode & 0x400)
3019 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3020 else
3021 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3022
3023 status = be_cmd_get_cntl_attributes(adapter);
3024 if (status)
3025 return status;
3026
3027 be_cmd_check_native_mode(adapter);
3028 return 0;
3029 }
3030
3031 static int be_dev_family_check(struct be_adapter *adapter)
3032 {
3033 struct pci_dev *pdev = adapter->pdev;
3034 u32 sli_intf = 0, if_type;
3035
3036 switch (pdev->device) {
3037 case BE_DEVICE_ID1:
3038 case OC_DEVICE_ID1:
3039 adapter->generation = BE_GEN2;
3040 break;
3041 case BE_DEVICE_ID2:
3042 case OC_DEVICE_ID2:
3043 adapter->generation = BE_GEN3;
3044 break;
3045 case OC_DEVICE_ID3:
3046 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3047 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3048 SLI_INTF_IF_TYPE_SHIFT;
3049
3050 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3051 if_type != 0x02) {
3052 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3053 return -EINVAL;
3054 }
3055 if (num_vfs > 0) {
3056 dev_err(&pdev->dev, "VFs not supported\n");
3057 return -EINVAL;
3058 }
3059 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3060 SLI_INTF_FAMILY_SHIFT);
3061 adapter->generation = BE_GEN3;
3062 break;
3063 default:
3064 adapter->generation = 0;
3065 }
3066 return 0;
3067 }
3068
3069 static int lancer_wait_ready(struct be_adapter *adapter)
3070 {
3071 #define SLIPORT_READY_TIMEOUT 500
3072 u32 sliport_status;
3073 int status = 0, i;
3074
3075 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3076 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3077 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3078 break;
3079
3080 msleep(20);
3081 }
3082
3083 if (i == SLIPORT_READY_TIMEOUT)
3084 status = -1;
3085
3086 return status;
3087 }
3088
3089 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3090 {
3091 int status;
3092 u32 sliport_status, err, reset_needed;
3093 status = lancer_wait_ready(adapter);
3094 if (!status) {
3095 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3096 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3097 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3098 if (err && reset_needed) {
3099 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3100 adapter->db + SLIPORT_CONTROL_OFFSET);
3101
3102 /* check adapter has corrected the error */
3103 status = lancer_wait_ready(adapter);
3104 sliport_status = ioread32(adapter->db +
3105 SLIPORT_STATUS_OFFSET);
3106 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3107 SLIPORT_STATUS_RN_MASK);
3108 if (status || sliport_status)
3109 status = -1;
3110 } else if (err || reset_needed) {
3111 status = -1;
3112 }
3113 }
3114 return status;
3115 }
3116
3117 static int __devinit be_probe(struct pci_dev *pdev,
3118 const struct pci_device_id *pdev_id)
3119 {
3120 int status = 0;
3121 struct be_adapter *adapter;
3122 struct net_device *netdev;
3123
3124 status = pci_enable_device(pdev);
3125 if (status)
3126 goto do_none;
3127
3128 status = pci_request_regions(pdev, DRV_NAME);
3129 if (status)
3130 goto disable_dev;
3131 pci_set_master(pdev);
3132
3133 netdev = alloc_etherdev(sizeof(struct be_adapter));
3134 if (netdev == NULL) {
3135 status = -ENOMEM;
3136 goto rel_reg;
3137 }
3138 adapter = netdev_priv(netdev);
3139 adapter->pdev = pdev;
3140 pci_set_drvdata(pdev, adapter);
3141
3142 status = be_dev_family_check(adapter);
3143 if (status)
3144 goto free_netdev;
3145
3146 adapter->netdev = netdev;
3147 SET_NETDEV_DEV(netdev, &pdev->dev);
3148
3149 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3150 if (!status) {
3151 netdev->features |= NETIF_F_HIGHDMA;
3152 } else {
3153 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3154 if (status) {
3155 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3156 goto free_netdev;
3157 }
3158 }
3159
3160 be_sriov_enable(adapter);
3161 if (adapter->sriov_enabled) {
3162 adapter->vf_cfg = kcalloc(num_vfs,
3163 sizeof(struct be_vf_cfg), GFP_KERNEL);
3164
3165 if (!adapter->vf_cfg)
3166 goto free_netdev;
3167 }
3168
3169 status = be_ctrl_init(adapter);
3170 if (status)
3171 goto free_vf_cfg;
3172
3173 if (lancer_chip(adapter)) {
3174 status = lancer_test_and_set_rdy_state(adapter);
3175 if (status) {
3176 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3177 goto ctrl_clean;
3178 }
3179 }
3180
3181 /* sync up with fw's ready state */
3182 if (be_physfn(adapter)) {
3183 status = be_cmd_POST(adapter);
3184 if (status)
3185 goto ctrl_clean;
3186 }
3187
3188 /* tell fw we're ready to fire cmds */
3189 status = be_cmd_fw_init(adapter);
3190 if (status)
3191 goto ctrl_clean;
3192
3193 status = be_cmd_reset_function(adapter);
3194 if (status)
3195 goto ctrl_clean;
3196
3197 status = be_stats_init(adapter);
3198 if (status)
3199 goto ctrl_clean;
3200
3201 status = be_get_config(adapter);
3202 if (status)
3203 goto stats_clean;
3204
3205 be_msix_enable(adapter);
3206
3207 INIT_DELAYED_WORK(&adapter->work, be_worker);
3208
3209 status = be_setup(adapter);
3210 if (status)
3211 goto msix_disable;
3212
3213 be_netdev_init(netdev);
3214 status = register_netdev(netdev);
3215 if (status != 0)
3216 goto unsetup;
3217 netif_carrier_off(netdev);
3218
3219 if (be_physfn(adapter) && adapter->sriov_enabled) {
3220 u8 mac_speed;
3221 bool link_up;
3222 u16 vf, lnk_speed;
3223
3224 status = be_vf_eth_addr_config(adapter);
3225 if (status)
3226 goto unreg_netdev;
3227
3228 for (vf = 0; vf < num_vfs; vf++) {
3229 status = be_cmd_link_status_query(adapter, &link_up,
3230 &mac_speed, &lnk_speed, vf + 1);
3231 if (!status)
3232 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3233 else
3234 goto unreg_netdev;
3235 }
3236 }
3237
3238 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3239 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3240 return 0;
3241
3242 unreg_netdev:
3243 unregister_netdev(netdev);
3244 unsetup:
3245 be_clear(adapter);
3246 msix_disable:
3247 be_msix_disable(adapter);
3248 stats_clean:
3249 be_stats_cleanup(adapter);
3250 ctrl_clean:
3251 be_ctrl_cleanup(adapter);
3252 free_vf_cfg:
3253 kfree(adapter->vf_cfg);
3254 free_netdev:
3255 be_sriov_disable(adapter);
3256 free_netdev(netdev);
3257 pci_set_drvdata(pdev, NULL);
3258 rel_reg:
3259 pci_release_regions(pdev);
3260 disable_dev:
3261 pci_disable_device(pdev);
3262 do_none:
3263 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3264 return status;
3265 }
3266
3267 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3268 {
3269 struct be_adapter *adapter = pci_get_drvdata(pdev);
3270 struct net_device *netdev = adapter->netdev;
3271
3272 cancel_delayed_work_sync(&adapter->work);
3273 if (adapter->wol)
3274 be_setup_wol(adapter, true);
3275
3276 netif_device_detach(netdev);
3277 if (netif_running(netdev)) {
3278 rtnl_lock();
3279 be_close(netdev);
3280 rtnl_unlock();
3281 }
3282 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3283 be_clear(adapter);
3284
3285 be_msix_disable(adapter);
3286 pci_save_state(pdev);
3287 pci_disable_device(pdev);
3288 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3289 return 0;
3290 }
3291
3292 static int be_resume(struct pci_dev *pdev)
3293 {
3294 int status = 0;
3295 struct be_adapter *adapter = pci_get_drvdata(pdev);
3296 struct net_device *netdev = adapter->netdev;
3297
3298 netif_device_detach(netdev);
3299
3300 status = pci_enable_device(pdev);
3301 if (status)
3302 return status;
3303
3304 pci_set_power_state(pdev, 0);
3305 pci_restore_state(pdev);
3306
3307 be_msix_enable(adapter);
3308 /* tell fw we're ready to fire cmds */
3309 status = be_cmd_fw_init(adapter);
3310 if (status)
3311 return status;
3312
3313 be_setup(adapter);
3314 if (netif_running(netdev)) {
3315 rtnl_lock();
3316 be_open(netdev);
3317 rtnl_unlock();
3318 }
3319 netif_device_attach(netdev);
3320
3321 if (adapter->wol)
3322 be_setup_wol(adapter, false);
3323
3324 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3325 return 0;
3326 }
3327
3328 /*
3329 * An FLR will stop BE from DMAing any data.
3330 */
3331 static void be_shutdown(struct pci_dev *pdev)
3332 {
3333 struct be_adapter *adapter = pci_get_drvdata(pdev);
3334
3335 if (!adapter)
3336 return;
3337
3338 cancel_delayed_work_sync(&adapter->work);
3339
3340 netif_device_detach(adapter->netdev);
3341
3342 if (adapter->wol)
3343 be_setup_wol(adapter, true);
3344
3345 be_cmd_reset_function(adapter);
3346
3347 pci_disable_device(pdev);
3348 }
3349
3350 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3351 pci_channel_state_t state)
3352 {
3353 struct be_adapter *adapter = pci_get_drvdata(pdev);
3354 struct net_device *netdev = adapter->netdev;
3355
3356 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3357
3358 adapter->eeh_err = true;
3359
3360 netif_device_detach(netdev);
3361
3362 if (netif_running(netdev)) {
3363 rtnl_lock();
3364 be_close(netdev);
3365 rtnl_unlock();
3366 }
3367 be_clear(adapter);
3368
3369 if (state == pci_channel_io_perm_failure)
3370 return PCI_ERS_RESULT_DISCONNECT;
3371
3372 pci_disable_device(pdev);
3373
3374 return PCI_ERS_RESULT_NEED_RESET;
3375 }
3376
3377 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3378 {
3379 struct be_adapter *adapter = pci_get_drvdata(pdev);
3380 int status;
3381
3382 dev_info(&adapter->pdev->dev, "EEH reset\n");
3383 adapter->eeh_err = false;
3384
3385 status = pci_enable_device(pdev);
3386 if (status)
3387 return PCI_ERS_RESULT_DISCONNECT;
3388
3389 pci_set_master(pdev);
3390 pci_set_power_state(pdev, 0);
3391 pci_restore_state(pdev);
3392
3393 /* Check if card is ok and fw is ready */
3394 status = be_cmd_POST(adapter);
3395 if (status)
3396 return PCI_ERS_RESULT_DISCONNECT;
3397
3398 return PCI_ERS_RESULT_RECOVERED;
3399 }
3400
3401 static void be_eeh_resume(struct pci_dev *pdev)
3402 {
3403 int status = 0;
3404 struct be_adapter *adapter = pci_get_drvdata(pdev);
3405 struct net_device *netdev = adapter->netdev;
3406
3407 dev_info(&adapter->pdev->dev, "EEH resume\n");
3408
3409 pci_save_state(pdev);
3410
3411 /* tell fw we're ready to fire cmds */
3412 status = be_cmd_fw_init(adapter);
3413 if (status)
3414 goto err;
3415
3416 status = be_setup(adapter);
3417 if (status)
3418 goto err;
3419
3420 if (netif_running(netdev)) {
3421 status = be_open(netdev);
3422 if (status)
3423 goto err;
3424 }
3425 netif_device_attach(netdev);
3426 return;
3427 err:
3428 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3429 }
3430
3431 static struct pci_error_handlers be_eeh_handlers = {
3432 .error_detected = be_eeh_err_detected,
3433 .slot_reset = be_eeh_reset,
3434 .resume = be_eeh_resume,
3435 };
3436
3437 static struct pci_driver be_driver = {
3438 .name = DRV_NAME,
3439 .id_table = be_dev_ids,
3440 .probe = be_probe,
3441 .remove = be_remove,
3442 .suspend = be_suspend,
3443 .resume = be_resume,
3444 .shutdown = be_shutdown,
3445 .err_handler = &be_eeh_handlers
3446 };
3447
3448 static int __init be_init_module(void)
3449 {
3450 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3451 rx_frag_size != 2048) {
3452 printk(KERN_WARNING DRV_NAME
3453 " : Module param rx_frag_size must be 2048/4096/8192."
3454 " Using 2048\n");
3455 rx_frag_size = 2048;
3456 }
3457
3458 return pci_register_driver(&be_driver);
3459 }
3460 module_init(be_init_module);
3461
3462 static void __exit be_exit_module(void)
3463 {
3464 pci_unregister_driver(&be_driver);
3465 }
3466 module_exit(be_exit_module);
This page took 0.098613 seconds and 6 git commands to generate.