Commit | Line | Data |
---|---|---|
6b7c5b94 SP |
1 | /* |
2 | * Copyright (C) 2005 - 2009 ServerEngines | |
3 | * All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version 2 | |
7 | * as published by the Free Software Foundation. The full GNU General | |
8 | * Public License is included in this distribution in the file called COPYING. | |
9 | * | |
10 | * Contact Information: | |
11 | * linux-drivers@serverengines.com | |
12 | * | |
13 | * ServerEngines | |
14 | * 209 N. Fair Oaks Ave | |
15 | * Sunnyvale, CA 94085 | |
16 | */ | |
17 | ||
18 | #include "be.h" | |
65f71b8b | 19 | #include <asm/div64.h> |
6b7c5b94 SP |
20 | |
21 | MODULE_VERSION(DRV_VER); | |
22 | MODULE_DEVICE_TABLE(pci, be_dev_ids); | |
23 | MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); | |
24 | MODULE_AUTHOR("ServerEngines Corporation"); | |
25 | MODULE_LICENSE("GPL"); | |
26 | ||
27 | static unsigned int rx_frag_size = 2048; | |
28 | module_param(rx_frag_size, uint, S_IRUGO); | |
29 | MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); | |
30 | ||
6b7c5b94 | 31 | static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { |
c4ca2374 AK |
32 | { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, |
33 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, | |
34 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, | |
6b7c5b94 SP |
35 | { 0 } |
36 | }; | |
37 | MODULE_DEVICE_TABLE(pci, be_dev_ids); | |
38 | ||
39 | static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) | |
40 | { | |
41 | struct be_dma_mem *mem = &q->dma_mem; | |
42 | if (mem->va) | |
43 | pci_free_consistent(adapter->pdev, mem->size, | |
44 | mem->va, mem->dma); | |
45 | } | |
46 | ||
47 | static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, | |
48 | u16 len, u16 entry_size) | |
49 | { | |
50 | struct be_dma_mem *mem = &q->dma_mem; | |
51 | ||
52 | memset(q, 0, sizeof(*q)); | |
53 | q->len = len; | |
54 | q->entry_size = entry_size; | |
55 | mem->size = len * entry_size; | |
56 | mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma); | |
57 | if (!mem->va) | |
58 | return -1; | |
59 | memset(mem->va, 0, mem->size); | |
60 | return 0; | |
61 | } | |
62 | ||
63 | static inline void *queue_head_node(struct be_queue_info *q) | |
64 | { | |
65 | return q->dma_mem.va + q->head * q->entry_size; | |
66 | } | |
67 | ||
68 | static inline void *queue_tail_node(struct be_queue_info *q) | |
69 | { | |
70 | return q->dma_mem.va + q->tail * q->entry_size; | |
71 | } | |
72 | ||
73 | static inline void queue_head_inc(struct be_queue_info *q) | |
74 | { | |
75 | index_inc(&q->head, q->len); | |
76 | } | |
77 | ||
78 | static inline void queue_tail_inc(struct be_queue_info *q) | |
79 | { | |
80 | index_inc(&q->tail, q->len); | |
81 | } | |
82 | ||
83 | static void be_intr_set(struct be_ctrl_info *ctrl, bool enable) | |
84 | { | |
85 | u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; | |
86 | u32 reg = ioread32(addr); | |
87 | u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; | |
88 | if (!enabled && enable) { | |
89 | reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; | |
90 | } else if (enabled && !enable) { | |
91 | reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; | |
92 | } else { | |
93 | printk(KERN_WARNING DRV_NAME | |
94 | ": bad value in membar_int_ctrl reg=0x%x\n", reg); | |
95 | return; | |
96 | } | |
97 | iowrite32(reg, addr); | |
98 | } | |
99 | ||
100 | static void be_rxq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted) | |
101 | { | |
102 | u32 val = 0; | |
103 | val |= qid & DB_RQ_RING_ID_MASK; | |
104 | val |= posted << DB_RQ_NUM_POSTED_SHIFT; | |
105 | iowrite32(val, ctrl->db + DB_RQ_OFFSET); | |
106 | } | |
107 | ||
108 | static void be_txq_notify(struct be_ctrl_info *ctrl, u16 qid, u16 posted) | |
109 | { | |
110 | u32 val = 0; | |
111 | val |= qid & DB_TXULP_RING_ID_MASK; | |
112 | val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; | |
113 | iowrite32(val, ctrl->db + DB_TXULP1_OFFSET); | |
114 | } | |
115 | ||
116 | static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid, | |
117 | bool arm, bool clear_int, u16 num_popped) | |
118 | { | |
119 | u32 val = 0; | |
120 | val |= qid & DB_EQ_RING_ID_MASK; | |
121 | if (arm) | |
122 | val |= 1 << DB_EQ_REARM_SHIFT; | |
123 | if (clear_int) | |
124 | val |= 1 << DB_EQ_CLR_SHIFT; | |
125 | val |= 1 << DB_EQ_EVNT_SHIFT; | |
126 | val |= num_popped << DB_EQ_NUM_POPPED_SHIFT; | |
127 | iowrite32(val, ctrl->db + DB_EQ_OFFSET); | |
128 | } | |
129 | ||
130 | static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, | |
131 | bool arm, u16 num_popped) | |
132 | { | |
133 | u32 val = 0; | |
134 | val |= qid & DB_CQ_RING_ID_MASK; | |
135 | if (arm) | |
136 | val |= 1 << DB_CQ_REARM_SHIFT; | |
137 | val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; | |
138 | iowrite32(val, ctrl->db + DB_CQ_OFFSET); | |
139 | } | |
140 | ||
141 | ||
142 | static int be_mac_addr_set(struct net_device *netdev, void *p) | |
143 | { | |
144 | struct be_adapter *adapter = netdev_priv(netdev); | |
145 | struct sockaddr *addr = p; | |
146 | int status = 0; | |
147 | ||
148 | if (netif_running(netdev)) { | |
149 | status = be_cmd_pmac_del(&adapter->ctrl, adapter->if_handle, | |
150 | adapter->pmac_id); | |
151 | if (status) | |
152 | return status; | |
153 | ||
154 | status = be_cmd_pmac_add(&adapter->ctrl, (u8 *)addr->sa_data, | |
155 | adapter->if_handle, &adapter->pmac_id); | |
156 | } | |
157 | ||
158 | if (!status) | |
159 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
160 | ||
161 | return status; | |
162 | } | |
163 | ||
164 | static void netdev_stats_update(struct be_adapter *adapter) | |
165 | { | |
166 | struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va); | |
167 | struct be_rxf_stats *rxf_stats = &hw_stats->rxf; | |
168 | struct be_port_rxf_stats *port_stats = | |
169 | &rxf_stats->port[adapter->port_num]; | |
170 | struct net_device_stats *dev_stats = &adapter->stats.net_stats; | |
171 | ||
172 | dev_stats->rx_packets = port_stats->rx_total_frames; | |
173 | dev_stats->tx_packets = port_stats->tx_unicastframes + | |
174 | port_stats->tx_multicastframes + port_stats->tx_broadcastframes; | |
175 | dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 | | |
176 | (u64) port_stats->rx_bytes_lsd; | |
177 | dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 | | |
178 | (u64) port_stats->tx_bytes_lsd; | |
179 | ||
180 | /* bad pkts received */ | |
181 | dev_stats->rx_errors = port_stats->rx_crc_errors + | |
182 | port_stats->rx_alignment_symbol_errors + | |
183 | port_stats->rx_in_range_errors + | |
184 | port_stats->rx_out_range_errors + port_stats->rx_frame_too_long; | |
185 | ||
186 | /* packet transmit problems */ | |
187 | dev_stats->tx_errors = 0; | |
188 | ||
189 | /* no space in linux buffers */ | |
190 | dev_stats->rx_dropped = 0; | |
191 | ||
192 | /* no space available in linux */ | |
193 | dev_stats->tx_dropped = 0; | |
194 | ||
195 | dev_stats->multicast = port_stats->tx_multicastframes; | |
196 | dev_stats->collisions = 0; | |
197 | ||
198 | /* detailed rx errors */ | |
199 | dev_stats->rx_length_errors = port_stats->rx_in_range_errors + | |
200 | port_stats->rx_out_range_errors + port_stats->rx_frame_too_long; | |
201 | /* receive ring buffer overflow */ | |
202 | dev_stats->rx_over_errors = 0; | |
203 | dev_stats->rx_crc_errors = port_stats->rx_crc_errors; | |
204 | ||
205 | /* frame alignment errors */ | |
206 | dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors; | |
207 | /* receiver fifo overrun */ | |
208 | /* drops_no_pbuf is no per i/f, it's per BE card */ | |
209 | dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow + | |
210 | port_stats->rx_input_fifo_overflow + | |
211 | rxf_stats->rx_drops_no_pbuf; | |
212 | /* receiver missed packetd */ | |
213 | dev_stats->rx_missed_errors = 0; | |
214 | /* detailed tx_errors */ | |
215 | dev_stats->tx_aborted_errors = 0; | |
216 | dev_stats->tx_carrier_errors = 0; | |
217 | dev_stats->tx_fifo_errors = 0; | |
218 | dev_stats->tx_heartbeat_errors = 0; | |
219 | dev_stats->tx_window_errors = 0; | |
220 | } | |
221 | ||
222 | static void be_link_status_update(struct be_adapter *adapter) | |
223 | { | |
224 | struct be_link_info *prev = &adapter->link; | |
225 | struct be_link_info now = { 0 }; | |
226 | struct net_device *netdev = adapter->netdev; | |
227 | ||
228 | be_cmd_link_status_query(&adapter->ctrl, &now); | |
229 | ||
230 | /* If link came up or went down */ | |
231 | if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO || | |
232 | prev->speed == PHY_LINK_SPEED_ZERO)) { | |
233 | if (now.speed == PHY_LINK_SPEED_ZERO) { | |
234 | netif_stop_queue(netdev); | |
235 | netif_carrier_off(netdev); | |
236 | printk(KERN_INFO "%s: Link down\n", netdev->name); | |
237 | } else { | |
238 | netif_start_queue(netdev); | |
239 | netif_carrier_on(netdev); | |
240 | printk(KERN_INFO "%s: Link up\n", netdev->name); | |
241 | } | |
242 | } | |
243 | *prev = now; | |
244 | } | |
245 | ||
246 | /* Update the EQ delay n BE based on the RX frags consumed / sec */ | |
247 | static void be_rx_eqd_update(struct be_adapter *adapter) | |
248 | { | |
6b7c5b94 SP |
249 | struct be_ctrl_info *ctrl = &adapter->ctrl; |
250 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | |
251 | struct be_drvr_stats *stats = &adapter->stats.drvr_stats; | |
4097f663 SP |
252 | ulong now = jiffies; |
253 | u32 eqd; | |
254 | ||
255 | if (!rx_eq->enable_aic) | |
256 | return; | |
257 | ||
258 | /* Wrapped around */ | |
259 | if (time_before(now, stats->rx_fps_jiffies)) { | |
260 | stats->rx_fps_jiffies = now; | |
261 | return; | |
262 | } | |
6b7c5b94 SP |
263 | |
264 | /* Update once a second */ | |
4097f663 | 265 | if ((now - stats->rx_fps_jiffies) < HZ) |
6b7c5b94 SP |
266 | return; |
267 | ||
268 | stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) / | |
4097f663 | 269 | ((now - stats->rx_fps_jiffies) / HZ); |
6b7c5b94 | 270 | |
4097f663 | 271 | stats->rx_fps_jiffies = now; |
6b7c5b94 SP |
272 | stats->be_prev_rx_frags = stats->be_rx_frags; |
273 | eqd = stats->be_rx_fps / 110000; | |
274 | eqd = eqd << 3; | |
275 | if (eqd > rx_eq->max_eqd) | |
276 | eqd = rx_eq->max_eqd; | |
277 | if (eqd < rx_eq->min_eqd) | |
278 | eqd = rx_eq->min_eqd; | |
279 | if (eqd < 10) | |
280 | eqd = 0; | |
281 | if (eqd != rx_eq->cur_eqd) | |
282 | be_cmd_modify_eqd(ctrl, rx_eq->q.id, eqd); | |
283 | ||
284 | rx_eq->cur_eqd = eqd; | |
285 | } | |
286 | ||
6b7c5b94 SP |
287 | static struct net_device_stats *be_get_stats(struct net_device *dev) |
288 | { | |
289 | struct be_adapter *adapter = netdev_priv(dev); | |
290 | ||
291 | return &adapter->stats.net_stats; | |
292 | } | |
293 | ||
65f71b8b SH |
294 | static u32 be_calc_rate(u64 bytes, unsigned long ticks) |
295 | { | |
296 | u64 rate = bytes; | |
297 | ||
298 | do_div(rate, ticks / HZ); | |
299 | rate <<= 3; /* bytes/sec -> bits/sec */ | |
300 | do_div(rate, 1000000ul); /* MB/Sec */ | |
301 | ||
302 | return rate; | |
303 | } | |
304 | ||
4097f663 SP |
305 | static void be_tx_rate_update(struct be_adapter *adapter) |
306 | { | |
307 | struct be_drvr_stats *stats = drvr_stats(adapter); | |
308 | ulong now = jiffies; | |
309 | ||
310 | /* Wrapped around? */ | |
311 | if (time_before(now, stats->be_tx_jiffies)) { | |
312 | stats->be_tx_jiffies = now; | |
313 | return; | |
314 | } | |
315 | ||
316 | /* Update tx rate once in two seconds */ | |
317 | if ((now - stats->be_tx_jiffies) > 2 * HZ) { | |
65f71b8b SH |
318 | stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes |
319 | - stats->be_tx_bytes_prev, | |
320 | now - stats->be_tx_jiffies); | |
4097f663 SP |
321 | stats->be_tx_jiffies = now; |
322 | stats->be_tx_bytes_prev = stats->be_tx_bytes; | |
323 | } | |
324 | } | |
325 | ||
6b7c5b94 SP |
326 | static void be_tx_stats_update(struct be_adapter *adapter, |
327 | u32 wrb_cnt, u32 copied, bool stopped) | |
328 | { | |
4097f663 | 329 | struct be_drvr_stats *stats = drvr_stats(adapter); |
6b7c5b94 SP |
330 | stats->be_tx_reqs++; |
331 | stats->be_tx_wrbs += wrb_cnt; | |
332 | stats->be_tx_bytes += copied; | |
333 | if (stopped) | |
334 | stats->be_tx_stops++; | |
6b7c5b94 SP |
335 | } |
336 | ||
337 | /* Determine number of WRB entries needed to xmit data in an skb */ | |
338 | static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) | |
339 | { | |
340 | int cnt = 0; | |
341 | while (skb) { | |
342 | if (skb->len > skb->data_len) | |
343 | cnt++; | |
344 | cnt += skb_shinfo(skb)->nr_frags; | |
345 | skb = skb_shinfo(skb)->frag_list; | |
346 | } | |
347 | /* to account for hdr wrb */ | |
348 | cnt++; | |
349 | if (cnt & 1) { | |
350 | /* add a dummy to make it an even num */ | |
351 | cnt++; | |
352 | *dummy = true; | |
353 | } else | |
354 | *dummy = false; | |
355 | BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); | |
356 | return cnt; | |
357 | } | |
358 | ||
359 | static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len) | |
360 | { | |
361 | wrb->frag_pa_hi = upper_32_bits(addr); | |
362 | wrb->frag_pa_lo = addr & 0xFFFFFFFF; | |
363 | wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; | |
364 | } | |
365 | ||
366 | static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, | |
367 | bool vlan, u32 wrb_cnt, u32 len) | |
368 | { | |
369 | memset(hdr, 0, sizeof(*hdr)); | |
370 | ||
371 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1); | |
372 | ||
373 | if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) { | |
374 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); | |
375 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, | |
376 | hdr, skb_shinfo(skb)->gso_size); | |
377 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
378 | if (is_tcp_pkt(skb)) | |
379 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); | |
380 | else if (is_udp_pkt(skb)) | |
381 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1); | |
382 | } | |
383 | ||
384 | if (vlan && vlan_tx_tag_present(skb)) { | |
385 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1); | |
386 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, | |
387 | hdr, vlan_tx_tag_get(skb)); | |
388 | } | |
389 | ||
390 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1); | |
391 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1); | |
392 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt); | |
393 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); | |
394 | } | |
395 | ||
396 | ||
397 | static int make_tx_wrbs(struct be_adapter *adapter, | |
398 | struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) | |
399 | { | |
400 | u64 busaddr; | |
401 | u32 i, copied = 0; | |
402 | struct pci_dev *pdev = adapter->pdev; | |
403 | struct sk_buff *first_skb = skb; | |
404 | struct be_queue_info *txq = &adapter->tx_obj.q; | |
405 | struct be_eth_wrb *wrb; | |
406 | struct be_eth_hdr_wrb *hdr; | |
407 | ||
408 | atomic_add(wrb_cnt, &txq->used); | |
409 | hdr = queue_head_node(txq); | |
410 | queue_head_inc(txq); | |
411 | ||
412 | while (skb) { | |
413 | if (skb->len > skb->data_len) { | |
414 | int len = skb->len - skb->data_len; | |
415 | busaddr = pci_map_single(pdev, skb->data, len, | |
416 | PCI_DMA_TODEVICE); | |
417 | wrb = queue_head_node(txq); | |
418 | wrb_fill(wrb, busaddr, len); | |
419 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | |
420 | queue_head_inc(txq); | |
421 | copied += len; | |
422 | } | |
423 | ||
424 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
425 | struct skb_frag_struct *frag = | |
426 | &skb_shinfo(skb)->frags[i]; | |
427 | busaddr = pci_map_page(pdev, frag->page, | |
428 | frag->page_offset, | |
429 | frag->size, PCI_DMA_TODEVICE); | |
430 | wrb = queue_head_node(txq); | |
431 | wrb_fill(wrb, busaddr, frag->size); | |
432 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | |
433 | queue_head_inc(txq); | |
434 | copied += frag->size; | |
435 | } | |
436 | skb = skb_shinfo(skb)->frag_list; | |
437 | } | |
438 | ||
439 | if (dummy_wrb) { | |
440 | wrb = queue_head_node(txq); | |
441 | wrb_fill(wrb, 0, 0); | |
442 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | |
443 | queue_head_inc(txq); | |
444 | } | |
445 | ||
446 | wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false, | |
447 | wrb_cnt, copied); | |
448 | be_dws_cpu_to_le(hdr, sizeof(*hdr)); | |
449 | ||
450 | return copied; | |
451 | } | |
452 | ||
453 | static int be_xmit(struct sk_buff *skb, struct net_device *netdev) | |
454 | { | |
455 | struct be_adapter *adapter = netdev_priv(netdev); | |
456 | struct be_tx_obj *tx_obj = &adapter->tx_obj; | |
457 | struct be_queue_info *txq = &tx_obj->q; | |
458 | u32 wrb_cnt = 0, copied = 0; | |
459 | u32 start = txq->head; | |
460 | bool dummy_wrb, stopped = false; | |
461 | ||
462 | wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); | |
463 | ||
464 | copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); | |
465 | ||
466 | /* record the sent skb in the sent_skb table */ | |
467 | BUG_ON(tx_obj->sent_skb_list[start]); | |
468 | tx_obj->sent_skb_list[start] = skb; | |
469 | ||
470 | /* Ensure that txq has space for the next skb; Else stop the queue | |
471 | * *BEFORE* ringing the tx doorbell, so that we serialze the | |
472 | * tx compls of the current transmit which'll wake up the queue | |
473 | */ | |
474 | if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) { | |
475 | netif_stop_queue(netdev); | |
476 | stopped = true; | |
477 | } | |
478 | ||
479 | be_txq_notify(&adapter->ctrl, txq->id, wrb_cnt); | |
480 | ||
481 | netdev->trans_start = jiffies; | |
482 | ||
483 | be_tx_stats_update(adapter, wrb_cnt, copied, stopped); | |
484 | return NETDEV_TX_OK; | |
485 | } | |
486 | ||
487 | static int be_change_mtu(struct net_device *netdev, int new_mtu) | |
488 | { | |
489 | struct be_adapter *adapter = netdev_priv(netdev); | |
490 | if (new_mtu < BE_MIN_MTU || | |
491 | new_mtu > BE_MAX_JUMBO_FRAME_SIZE) { | |
492 | dev_info(&adapter->pdev->dev, | |
493 | "MTU must be between %d and %d bytes\n", | |
494 | BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE); | |
495 | return -EINVAL; | |
496 | } | |
497 | dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", | |
498 | netdev->mtu, new_mtu); | |
499 | netdev->mtu = new_mtu; | |
500 | return 0; | |
501 | } | |
502 | ||
503 | /* | |
504 | * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured, | |
505 | * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured, | |
506 | * set the BE in promiscuous VLAN mode. | |
507 | */ | |
1ab1ab75 | 508 | static void be_vid_config(struct net_device *netdev) |
6b7c5b94 SP |
509 | { |
510 | struct be_adapter *adapter = netdev_priv(netdev); | |
511 | u16 vtag[BE_NUM_VLANS_SUPPORTED]; | |
512 | u16 ntags = 0, i; | |
513 | ||
514 | if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) { | |
515 | /* Construct VLAN Table to give to HW */ | |
516 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | |
517 | if (adapter->vlan_tag[i]) { | |
518 | vtag[ntags] = cpu_to_le16(i); | |
519 | ntags++; | |
520 | } | |
521 | } | |
522 | be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle, | |
523 | vtag, ntags, 1, 0); | |
524 | } else { | |
525 | be_cmd_vlan_config(&adapter->ctrl, adapter->if_handle, | |
526 | NULL, 0, 1, 1); | |
527 | } | |
528 | } | |
529 | ||
530 | static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp) | |
531 | { | |
532 | struct be_adapter *adapter = netdev_priv(netdev); | |
533 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | |
534 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | |
535 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
536 | ||
537 | be_eq_notify(ctrl, rx_eq->q.id, false, false, 0); | |
538 | be_eq_notify(ctrl, tx_eq->q.id, false, false, 0); | |
539 | adapter->vlan_grp = grp; | |
540 | be_eq_notify(ctrl, rx_eq->q.id, true, false, 0); | |
541 | be_eq_notify(ctrl, tx_eq->q.id, true, false, 0); | |
542 | } | |
543 | ||
544 | static void be_vlan_add_vid(struct net_device *netdev, u16 vid) | |
545 | { | |
546 | struct be_adapter *adapter = netdev_priv(netdev); | |
547 | ||
548 | adapter->num_vlans++; | |
549 | adapter->vlan_tag[vid] = 1; | |
550 | ||
1ab1ab75 | 551 | be_vid_config(netdev); |
6b7c5b94 SP |
552 | } |
553 | ||
554 | static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) | |
555 | { | |
556 | struct be_adapter *adapter = netdev_priv(netdev); | |
557 | ||
558 | adapter->num_vlans--; | |
559 | adapter->vlan_tag[vid] = 0; | |
560 | ||
561 | vlan_group_set_device(adapter->vlan_grp, vid, NULL); | |
1ab1ab75 | 562 | be_vid_config(netdev); |
6b7c5b94 SP |
563 | } |
564 | ||
565 | static void be_set_multicast_filter(struct net_device *netdev) | |
566 | { | |
567 | struct be_adapter *adapter = netdev_priv(netdev); | |
568 | struct dev_mc_list *mc_ptr; | |
569 | u8 mac_addr[32][ETH_ALEN]; | |
570 | int i = 0; | |
571 | ||
572 | if (netdev->flags & IFF_ALLMULTI) { | |
573 | /* set BE in Multicast promiscuous */ | |
574 | be_cmd_mcast_mac_set(&adapter->ctrl, | |
575 | adapter->if_handle, NULL, 0, true); | |
576 | return; | |
577 | } | |
578 | ||
579 | for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { | |
580 | memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN); | |
581 | if (++i >= 32) { | |
582 | be_cmd_mcast_mac_set(&adapter->ctrl, | |
583 | adapter->if_handle, &mac_addr[0][0], i, false); | |
584 | i = 0; | |
585 | } | |
586 | ||
587 | } | |
588 | ||
589 | if (i) { | |
590 | /* reset the promiscuous mode also. */ | |
591 | be_cmd_mcast_mac_set(&adapter->ctrl, | |
592 | adapter->if_handle, &mac_addr[0][0], i, false); | |
593 | } | |
594 | } | |
595 | ||
596 | static void be_set_multicast_list(struct net_device *netdev) | |
597 | { | |
598 | struct be_adapter *adapter = netdev_priv(netdev); | |
599 | ||
600 | if (netdev->flags & IFF_PROMISC) { | |
601 | be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1); | |
602 | } else { | |
603 | be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0); | |
604 | be_set_multicast_filter(netdev); | |
605 | } | |
606 | } | |
607 | ||
4097f663 | 608 | static void be_rx_rate_update(struct be_adapter *adapter) |
6b7c5b94 | 609 | { |
4097f663 SP |
610 | struct be_drvr_stats *stats = drvr_stats(adapter); |
611 | ulong now = jiffies; | |
6b7c5b94 | 612 | |
4097f663 SP |
613 | /* Wrapped around */ |
614 | if (time_before(now, stats->be_rx_jiffies)) { | |
615 | stats->be_rx_jiffies = now; | |
616 | return; | |
617 | } | |
6b7c5b94 SP |
618 | |
619 | /* Update the rate once in two seconds */ | |
4097f663 | 620 | if ((now - stats->be_rx_jiffies) < 2 * HZ) |
6b7c5b94 SP |
621 | return; |
622 | ||
65f71b8b SH |
623 | stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes |
624 | - stats->be_rx_bytes_prev, | |
625 | now - stats->be_rx_jiffies); | |
4097f663 | 626 | stats->be_rx_jiffies = now; |
6b7c5b94 SP |
627 | stats->be_rx_bytes_prev = stats->be_rx_bytes; |
628 | } | |
629 | ||
4097f663 SP |
630 | static void be_rx_stats_update(struct be_adapter *adapter, |
631 | u32 pktsize, u16 numfrags) | |
632 | { | |
633 | struct be_drvr_stats *stats = drvr_stats(adapter); | |
634 | ||
635 | stats->be_rx_compl++; | |
636 | stats->be_rx_frags += numfrags; | |
637 | stats->be_rx_bytes += pktsize; | |
638 | } | |
639 | ||
6b7c5b94 SP |
640 | static struct be_rx_page_info * |
641 | get_rx_page_info(struct be_adapter *adapter, u16 frag_idx) | |
642 | { | |
643 | struct be_rx_page_info *rx_page_info; | |
644 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
645 | ||
646 | rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx]; | |
647 | BUG_ON(!rx_page_info->page); | |
648 | ||
649 | if (rx_page_info->last_page_user) | |
650 | pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus), | |
651 | adapter->big_page_size, PCI_DMA_FROMDEVICE); | |
652 | ||
653 | atomic_dec(&rxq->used); | |
654 | return rx_page_info; | |
655 | } | |
656 | ||
657 | /* Throwaway the data in the Rx completion */ | |
658 | static void be_rx_compl_discard(struct be_adapter *adapter, | |
659 | struct be_eth_rx_compl *rxcp) | |
660 | { | |
661 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
662 | struct be_rx_page_info *page_info; | |
663 | u16 rxq_idx, i, num_rcvd; | |
664 | ||
665 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | |
666 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | |
667 | ||
668 | for (i = 0; i < num_rcvd; i++) { | |
669 | page_info = get_rx_page_info(adapter, rxq_idx); | |
670 | put_page(page_info->page); | |
671 | memset(page_info, 0, sizeof(*page_info)); | |
672 | index_inc(&rxq_idx, rxq->len); | |
673 | } | |
674 | } | |
675 | ||
676 | /* | |
677 | * skb_fill_rx_data forms a complete skb for an ether frame | |
678 | * indicated by rxcp. | |
679 | */ | |
680 | static void skb_fill_rx_data(struct be_adapter *adapter, | |
681 | struct sk_buff *skb, struct be_eth_rx_compl *rxcp) | |
682 | { | |
683 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
684 | struct be_rx_page_info *page_info; | |
685 | u16 rxq_idx, i, num_rcvd; | |
686 | u32 pktsize, hdr_len, curr_frag_len; | |
687 | u8 *start; | |
688 | ||
689 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | |
690 | pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); | |
691 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | |
692 | ||
693 | page_info = get_rx_page_info(adapter, rxq_idx); | |
694 | ||
695 | start = page_address(page_info->page) + page_info->page_offset; | |
696 | prefetch(start); | |
697 | ||
698 | /* Copy data in the first descriptor of this completion */ | |
699 | curr_frag_len = min(pktsize, rx_frag_size); | |
700 | ||
701 | /* Copy the header portion into skb_data */ | |
702 | hdr_len = min((u32)BE_HDR_LEN, curr_frag_len); | |
703 | memcpy(skb->data, start, hdr_len); | |
704 | skb->len = curr_frag_len; | |
705 | if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */ | |
706 | /* Complete packet has now been moved to data */ | |
707 | put_page(page_info->page); | |
708 | skb->data_len = 0; | |
709 | skb->tail += curr_frag_len; | |
710 | } else { | |
711 | skb_shinfo(skb)->nr_frags = 1; | |
712 | skb_shinfo(skb)->frags[0].page = page_info->page; | |
713 | skb_shinfo(skb)->frags[0].page_offset = | |
714 | page_info->page_offset + hdr_len; | |
715 | skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len; | |
716 | skb->data_len = curr_frag_len - hdr_len; | |
717 | skb->tail += hdr_len; | |
718 | } | |
719 | memset(page_info, 0, sizeof(*page_info)); | |
720 | ||
721 | if (pktsize <= rx_frag_size) { | |
722 | BUG_ON(num_rcvd != 1); | |
723 | return; | |
724 | } | |
725 | ||
726 | /* More frags present for this completion */ | |
727 | pktsize -= curr_frag_len; /* account for above copied frag */ | |
728 | for (i = 1; i < num_rcvd; i++) { | |
729 | index_inc(&rxq_idx, rxq->len); | |
730 | page_info = get_rx_page_info(adapter, rxq_idx); | |
731 | ||
732 | curr_frag_len = min(pktsize, rx_frag_size); | |
733 | ||
734 | skb_shinfo(skb)->frags[i].page = page_info->page; | |
735 | skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset; | |
736 | skb_shinfo(skb)->frags[i].size = curr_frag_len; | |
737 | skb->len += curr_frag_len; | |
738 | skb->data_len += curr_frag_len; | |
739 | skb_shinfo(skb)->nr_frags++; | |
740 | pktsize -= curr_frag_len; | |
741 | ||
742 | memset(page_info, 0, sizeof(*page_info)); | |
743 | } | |
744 | ||
4097f663 | 745 | be_rx_stats_update(adapter, pktsize, num_rcvd); |
6b7c5b94 SP |
746 | return; |
747 | } | |
748 | ||
749 | /* Process the RX completion indicated by rxcp when LRO is disabled */ | |
750 | static void be_rx_compl_process(struct be_adapter *adapter, | |
751 | struct be_eth_rx_compl *rxcp) | |
752 | { | |
753 | struct sk_buff *skb; | |
754 | u32 vtp, vid; | |
755 | int l4_cksm; | |
756 | ||
757 | l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); | |
758 | vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); | |
759 | ||
760 | skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); | |
761 | if (!skb) { | |
762 | if (net_ratelimit()) | |
763 | dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); | |
764 | be_rx_compl_discard(adapter, rxcp); | |
765 | return; | |
766 | } | |
767 | ||
768 | skb_reserve(skb, NET_IP_ALIGN); | |
769 | ||
770 | skb_fill_rx_data(adapter, skb, rxcp); | |
771 | ||
772 | if (l4_cksm && adapter->rx_csum) | |
773 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
774 | else | |
775 | skb->ip_summed = CHECKSUM_NONE; | |
776 | ||
777 | skb->truesize = skb->len + sizeof(struct sk_buff); | |
778 | skb->protocol = eth_type_trans(skb, adapter->netdev); | |
779 | skb->dev = adapter->netdev; | |
780 | ||
781 | if (vtp) { | |
782 | if (!adapter->vlan_grp || adapter->num_vlans == 0) { | |
783 | kfree_skb(skb); | |
784 | return; | |
785 | } | |
786 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); | |
787 | vid = be16_to_cpu(vid); | |
788 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); | |
789 | } else { | |
790 | netif_receive_skb(skb); | |
791 | } | |
792 | ||
793 | adapter->netdev->last_rx = jiffies; | |
794 | ||
795 | return; | |
796 | } | |
797 | ||
798 | /* Process the RX completion indicated by rxcp when LRO is enabled */ | |
799 | static void be_rx_compl_process_lro(struct be_adapter *adapter, | |
800 | struct be_eth_rx_compl *rxcp) | |
801 | { | |
802 | struct be_rx_page_info *page_info; | |
803 | struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; | |
804 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
805 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; | |
806 | u16 i, rxq_idx = 0, vid; | |
807 | ||
808 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | |
809 | pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); | |
810 | vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); | |
811 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | |
812 | ||
813 | remaining = pkt_size; | |
814 | for (i = 0; i < num_rcvd; i++) { | |
815 | page_info = get_rx_page_info(adapter, rxq_idx); | |
816 | ||
817 | curr_frag_len = min(remaining, rx_frag_size); | |
818 | ||
819 | rx_frags[i].page = page_info->page; | |
820 | rx_frags[i].page_offset = page_info->page_offset; | |
821 | rx_frags[i].size = curr_frag_len; | |
822 | remaining -= curr_frag_len; | |
823 | ||
824 | index_inc(&rxq_idx, rxq->len); | |
825 | ||
826 | memset(page_info, 0, sizeof(*page_info)); | |
827 | } | |
828 | ||
829 | if (likely(!vlanf)) { | |
830 | lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size, | |
831 | pkt_size, NULL, 0); | |
832 | } else { | |
833 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); | |
834 | vid = be16_to_cpu(vid); | |
835 | ||
836 | if (!adapter->vlan_grp || adapter->num_vlans == 0) | |
837 | return; | |
838 | ||
839 | lro_vlan_hwaccel_receive_frags(&adapter->rx_obj.lro_mgr, | |
840 | rx_frags, pkt_size, pkt_size, adapter->vlan_grp, | |
841 | vid, NULL, 0); | |
842 | } | |
843 | ||
4097f663 | 844 | be_rx_stats_update(adapter, pkt_size, num_rcvd); |
6b7c5b94 SP |
845 | return; |
846 | } | |
847 | ||
848 | static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) | |
849 | { | |
850 | struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq); | |
851 | ||
852 | if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) | |
853 | return NULL; | |
854 | ||
855 | be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); | |
856 | ||
857 | rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0; | |
858 | ||
859 | queue_tail_inc(&adapter->rx_obj.cq); | |
860 | return rxcp; | |
861 | } | |
862 | ||
863 | static inline struct page *be_alloc_pages(u32 size) | |
864 | { | |
865 | gfp_t alloc_flags = GFP_ATOMIC; | |
866 | u32 order = get_order(size); | |
867 | if (order > 0) | |
868 | alloc_flags |= __GFP_COMP; | |
869 | return alloc_pages(alloc_flags, order); | |
870 | } | |
871 | ||
872 | /* | |
873 | * Allocate a page, split it to fragments of size rx_frag_size and post as | |
874 | * receive buffers to BE | |
875 | */ | |
876 | static void be_post_rx_frags(struct be_adapter *adapter) | |
877 | { | |
878 | struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; | |
879 | struct be_rx_page_info *page_info = NULL; | |
880 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
881 | struct page *pagep = NULL; | |
882 | struct be_eth_rx_d *rxd; | |
883 | u64 page_dmaaddr = 0, frag_dmaaddr; | |
884 | u32 posted, page_offset = 0; | |
885 | ||
6b7c5b94 SP |
886 | page_info = &page_info_tbl[rxq->head]; |
887 | for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { | |
888 | if (!pagep) { | |
889 | pagep = be_alloc_pages(adapter->big_page_size); | |
890 | if (unlikely(!pagep)) { | |
891 | drvr_stats(adapter)->be_ethrx_post_fail++; | |
892 | break; | |
893 | } | |
894 | page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, | |
895 | adapter->big_page_size, | |
896 | PCI_DMA_FROMDEVICE); | |
897 | page_info->page_offset = 0; | |
898 | } else { | |
899 | get_page(pagep); | |
900 | page_info->page_offset = page_offset + rx_frag_size; | |
901 | } | |
902 | page_offset = page_info->page_offset; | |
903 | page_info->page = pagep; | |
904 | pci_unmap_addr_set(page_info, bus, page_dmaaddr); | |
905 | frag_dmaaddr = page_dmaaddr + page_info->page_offset; | |
906 | ||
907 | rxd = queue_head_node(rxq); | |
908 | rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); | |
909 | rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); | |
910 | queue_head_inc(rxq); | |
911 | ||
912 | /* Any space left in the current big page for another frag? */ | |
913 | if ((page_offset + rx_frag_size + rx_frag_size) > | |
914 | adapter->big_page_size) { | |
915 | pagep = NULL; | |
916 | page_info->last_page_user = true; | |
917 | } | |
918 | page_info = &page_info_tbl[rxq->head]; | |
919 | } | |
920 | if (pagep) | |
921 | page_info->last_page_user = true; | |
922 | ||
923 | if (posted) { | |
6b7c5b94 | 924 | atomic_add(posted, &rxq->used); |
ea1dae11 SP |
925 | be_rxq_notify(&adapter->ctrl, rxq->id, posted); |
926 | } else if (atomic_read(&rxq->used) == 0) { | |
927 | /* Let be_worker replenish when memory is available */ | |
928 | adapter->rx_post_starved = true; | |
6b7c5b94 SP |
929 | } |
930 | ||
931 | return; | |
932 | } | |
933 | ||
934 | static struct be_eth_tx_compl * | |
935 | be_tx_compl_get(struct be_adapter *adapter) | |
936 | { | |
937 | struct be_queue_info *tx_cq = &adapter->tx_obj.cq; | |
938 | struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); | |
939 | ||
940 | if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) | |
941 | return NULL; | |
942 | ||
943 | be_dws_le_to_cpu(txcp, sizeof(*txcp)); | |
944 | ||
945 | txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; | |
946 | ||
947 | queue_tail_inc(tx_cq); | |
948 | return txcp; | |
949 | } | |
950 | ||
951 | static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) | |
952 | { | |
953 | struct be_queue_info *txq = &adapter->tx_obj.q; | |
954 | struct be_eth_wrb *wrb; | |
955 | struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; | |
956 | struct sk_buff *sent_skb; | |
957 | u64 busaddr; | |
958 | u16 cur_index, num_wrbs = 0; | |
959 | ||
960 | cur_index = txq->tail; | |
961 | sent_skb = sent_skbs[cur_index]; | |
962 | BUG_ON(!sent_skb); | |
963 | sent_skbs[cur_index] = NULL; | |
964 | ||
965 | do { | |
966 | cur_index = txq->tail; | |
967 | wrb = queue_tail_node(txq); | |
968 | be_dws_le_to_cpu(wrb, sizeof(*wrb)); | |
969 | busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo; | |
970 | if (busaddr != 0) { | |
971 | pci_unmap_single(adapter->pdev, busaddr, | |
972 | wrb->frag_len, PCI_DMA_TODEVICE); | |
973 | } | |
974 | num_wrbs++; | |
975 | queue_tail_inc(txq); | |
976 | } while (cur_index != last_index); | |
977 | ||
978 | atomic_sub(num_wrbs, &txq->used); | |
979 | ||
980 | kfree_skb(sent_skb); | |
981 | } | |
982 | ||
983 | static void be_rx_q_clean(struct be_adapter *adapter) | |
984 | { | |
985 | struct be_rx_page_info *page_info; | |
986 | struct be_queue_info *rxq = &adapter->rx_obj.q; | |
987 | struct be_queue_info *rx_cq = &adapter->rx_obj.cq; | |
988 | struct be_eth_rx_compl *rxcp; | |
989 | u16 tail; | |
990 | ||
991 | /* First cleanup pending rx completions */ | |
992 | while ((rxcp = be_rx_compl_get(adapter)) != NULL) { | |
993 | be_rx_compl_discard(adapter, rxcp); | |
994 | be_cq_notify(&adapter->ctrl, rx_cq->id, true, 1); | |
995 | } | |
996 | ||
997 | /* Then free posted rx buffer that were not used */ | |
998 | tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len; | |
999 | for (; tail != rxq->head; index_inc(&tail, rxq->len)) { | |
1000 | page_info = get_rx_page_info(adapter, tail); | |
1001 | put_page(page_info->page); | |
1002 | memset(page_info, 0, sizeof(*page_info)); | |
1003 | } | |
1004 | BUG_ON(atomic_read(&rxq->used)); | |
1005 | } | |
1006 | ||
1007 | static void be_tx_q_clean(struct be_adapter *adapter) | |
1008 | { | |
1009 | struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; | |
1010 | struct sk_buff *sent_skb; | |
1011 | struct be_queue_info *txq = &adapter->tx_obj.q; | |
1012 | u16 last_index; | |
1013 | bool dummy_wrb; | |
1014 | ||
1015 | while (atomic_read(&txq->used)) { | |
1016 | sent_skb = sent_skbs[txq->tail]; | |
1017 | last_index = txq->tail; | |
1018 | index_adv(&last_index, | |
1019 | wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len); | |
1020 | be_tx_compl_process(adapter, last_index); | |
1021 | } | |
1022 | } | |
1023 | ||
1024 | static void be_tx_queues_destroy(struct be_adapter *adapter) | |
1025 | { | |
1026 | struct be_queue_info *q; | |
1027 | ||
1028 | q = &adapter->tx_obj.q; | |
1029 | if (q->created) | |
1030 | be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_TXQ); | |
1031 | be_queue_free(adapter, q); | |
1032 | ||
1033 | q = &adapter->tx_obj.cq; | |
1034 | if (q->created) | |
1035 | be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ); | |
1036 | be_queue_free(adapter, q); | |
1037 | ||
1038 | /* No more tx completions can be rcvd now; clean up if there are | |
1039 | * any pending completions or pending tx requests */ | |
1040 | be_tx_q_clean(adapter); | |
1041 | ||
1042 | q = &adapter->tx_eq.q; | |
1043 | if (q->created) | |
1044 | be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ); | |
1045 | be_queue_free(adapter, q); | |
1046 | } | |
1047 | ||
1048 | static int be_tx_queues_create(struct be_adapter *adapter) | |
1049 | { | |
1050 | struct be_queue_info *eq, *q, *cq; | |
1051 | ||
1052 | adapter->tx_eq.max_eqd = 0; | |
1053 | adapter->tx_eq.min_eqd = 0; | |
1054 | adapter->tx_eq.cur_eqd = 96; | |
1055 | adapter->tx_eq.enable_aic = false; | |
1056 | /* Alloc Tx Event queue */ | |
1057 | eq = &adapter->tx_eq.q; | |
1058 | if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry))) | |
1059 | return -1; | |
1060 | ||
1061 | /* Ask BE to create Tx Event queue */ | |
1062 | if (be_cmd_eq_create(&adapter->ctrl, eq, adapter->tx_eq.cur_eqd)) | |
1063 | goto tx_eq_free; | |
1064 | /* Alloc TX eth compl queue */ | |
1065 | cq = &adapter->tx_obj.cq; | |
1066 | if (be_queue_alloc(adapter, cq, TX_CQ_LEN, | |
1067 | sizeof(struct be_eth_tx_compl))) | |
1068 | goto tx_eq_destroy; | |
1069 | ||
1070 | /* Ask BE to create Tx eth compl queue */ | |
1071 | if (be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3)) | |
1072 | goto tx_cq_free; | |
1073 | ||
1074 | /* Alloc TX eth queue */ | |
1075 | q = &adapter->tx_obj.q; | |
1076 | if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb))) | |
1077 | goto tx_cq_destroy; | |
1078 | ||
1079 | /* Ask BE to create Tx eth queue */ | |
1080 | if (be_cmd_txq_create(&adapter->ctrl, q, cq)) | |
1081 | goto tx_q_free; | |
1082 | return 0; | |
1083 | ||
1084 | tx_q_free: | |
1085 | be_queue_free(adapter, q); | |
1086 | tx_cq_destroy: | |
1087 | be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ); | |
1088 | tx_cq_free: | |
1089 | be_queue_free(adapter, cq); | |
1090 | tx_eq_destroy: | |
1091 | be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ); | |
1092 | tx_eq_free: | |
1093 | be_queue_free(adapter, eq); | |
1094 | return -1; | |
1095 | } | |
1096 | ||
1097 | static void be_rx_queues_destroy(struct be_adapter *adapter) | |
1098 | { | |
1099 | struct be_queue_info *q; | |
1100 | ||
1101 | q = &adapter->rx_obj.q; | |
1102 | if (q->created) { | |
1103 | be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_RXQ); | |
1104 | be_rx_q_clean(adapter); | |
1105 | } | |
1106 | be_queue_free(adapter, q); | |
1107 | ||
1108 | q = &adapter->rx_obj.cq; | |
1109 | if (q->created) | |
1110 | be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_CQ); | |
1111 | be_queue_free(adapter, q); | |
1112 | ||
1113 | q = &adapter->rx_eq.q; | |
1114 | if (q->created) | |
1115 | be_cmd_q_destroy(&adapter->ctrl, q, QTYPE_EQ); | |
1116 | be_queue_free(adapter, q); | |
1117 | } | |
1118 | ||
1119 | static int be_rx_queues_create(struct be_adapter *adapter) | |
1120 | { | |
1121 | struct be_queue_info *eq, *q, *cq; | |
1122 | int rc; | |
1123 | ||
1124 | adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME; | |
1125 | adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; | |
1126 | adapter->rx_eq.max_eqd = BE_MAX_EQD; | |
1127 | adapter->rx_eq.min_eqd = 0; | |
1128 | adapter->rx_eq.cur_eqd = 0; | |
1129 | adapter->rx_eq.enable_aic = true; | |
1130 | ||
1131 | /* Alloc Rx Event queue */ | |
1132 | eq = &adapter->rx_eq.q; | |
1133 | rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, | |
1134 | sizeof(struct be_eq_entry)); | |
1135 | if (rc) | |
1136 | return rc; | |
1137 | ||
1138 | /* Ask BE to create Rx Event queue */ | |
1139 | rc = be_cmd_eq_create(&adapter->ctrl, eq, adapter->rx_eq.cur_eqd); | |
1140 | if (rc) | |
1141 | goto rx_eq_free; | |
1142 | ||
1143 | /* Alloc RX eth compl queue */ | |
1144 | cq = &adapter->rx_obj.cq; | |
1145 | rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, | |
1146 | sizeof(struct be_eth_rx_compl)); | |
1147 | if (rc) | |
1148 | goto rx_eq_destroy; | |
1149 | ||
1150 | /* Ask BE to create Rx eth compl queue */ | |
1151 | rc = be_cmd_cq_create(&adapter->ctrl, cq, eq, false, false, 3); | |
1152 | if (rc) | |
1153 | goto rx_cq_free; | |
1154 | ||
1155 | /* Alloc RX eth queue */ | |
1156 | q = &adapter->rx_obj.q; | |
1157 | rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d)); | |
1158 | if (rc) | |
1159 | goto rx_cq_destroy; | |
1160 | ||
1161 | /* Ask BE to create Rx eth queue */ | |
1162 | rc = be_cmd_rxq_create(&adapter->ctrl, q, cq->id, rx_frag_size, | |
1163 | BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false); | |
1164 | if (rc) | |
1165 | goto rx_q_free; | |
1166 | ||
1167 | return 0; | |
1168 | rx_q_free: | |
1169 | be_queue_free(adapter, q); | |
1170 | rx_cq_destroy: | |
1171 | be_cmd_q_destroy(&adapter->ctrl, cq, QTYPE_CQ); | |
1172 | rx_cq_free: | |
1173 | be_queue_free(adapter, cq); | |
1174 | rx_eq_destroy: | |
1175 | be_cmd_q_destroy(&adapter->ctrl, eq, QTYPE_EQ); | |
1176 | rx_eq_free: | |
1177 | be_queue_free(adapter, eq); | |
1178 | return rc; | |
1179 | } | |
1180 | static bool event_get(struct be_eq_obj *eq_obj, u16 *rid) | |
1181 | { | |
1182 | struct be_eq_entry *entry = queue_tail_node(&eq_obj->q); | |
1183 | u32 evt = entry->evt; | |
1184 | ||
1185 | if (!evt) | |
1186 | return false; | |
1187 | ||
1188 | evt = le32_to_cpu(evt); | |
1189 | *rid = (evt >> EQ_ENTRY_RES_ID_SHIFT) & EQ_ENTRY_RES_ID_MASK; | |
1190 | entry->evt = 0; | |
1191 | queue_tail_inc(&eq_obj->q); | |
1192 | return true; | |
1193 | } | |
1194 | ||
1195 | static int event_handle(struct be_ctrl_info *ctrl, | |
1196 | struct be_eq_obj *eq_obj) | |
1197 | { | |
1198 | u16 rid = 0, num = 0; | |
1199 | ||
1200 | while (event_get(eq_obj, &rid)) | |
1201 | num++; | |
1202 | ||
1203 | /* We can see an interrupt and no event */ | |
1204 | be_eq_notify(ctrl, eq_obj->q.id, true, true, num); | |
1205 | if (num) | |
1206 | napi_schedule(&eq_obj->napi); | |
1207 | ||
1208 | return num; | |
1209 | } | |
1210 | ||
1211 | static irqreturn_t be_intx(int irq, void *dev) | |
1212 | { | |
1213 | struct be_adapter *adapter = dev; | |
1214 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
1215 | int rx, tx; | |
1216 | ||
1217 | tx = event_handle(ctrl, &adapter->tx_eq); | |
1218 | rx = event_handle(ctrl, &adapter->rx_eq); | |
1219 | ||
1220 | if (rx || tx) | |
1221 | return IRQ_HANDLED; | |
1222 | else | |
1223 | return IRQ_NONE; | |
1224 | } | |
1225 | ||
1226 | static irqreturn_t be_msix_rx(int irq, void *dev) | |
1227 | { | |
1228 | struct be_adapter *adapter = dev; | |
1229 | ||
1230 | event_handle(&adapter->ctrl, &adapter->rx_eq); | |
1231 | ||
1232 | return IRQ_HANDLED; | |
1233 | } | |
1234 | ||
1235 | static irqreturn_t be_msix_tx(int irq, void *dev) | |
1236 | { | |
1237 | struct be_adapter *adapter = dev; | |
1238 | ||
1239 | event_handle(&adapter->ctrl, &adapter->tx_eq); | |
1240 | ||
1241 | return IRQ_HANDLED; | |
1242 | } | |
1243 | ||
1244 | static inline bool do_lro(struct be_adapter *adapter, | |
1245 | struct be_eth_rx_compl *rxcp) | |
1246 | { | |
1247 | int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); | |
1248 | int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); | |
1249 | ||
1250 | if (err) | |
1251 | drvr_stats(adapter)->be_rxcp_err++; | |
1252 | ||
1253 | return (!tcp_frame || err || (adapter->max_rx_coal <= 1)) ? | |
1254 | false : true; | |
1255 | } | |
1256 | ||
1257 | int be_poll_rx(struct napi_struct *napi, int budget) | |
1258 | { | |
1259 | struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi); | |
1260 | struct be_adapter *adapter = | |
1261 | container_of(rx_eq, struct be_adapter, rx_eq); | |
1262 | struct be_queue_info *rx_cq = &adapter->rx_obj.cq; | |
1263 | struct be_eth_rx_compl *rxcp; | |
1264 | u32 work_done; | |
1265 | ||
1266 | for (work_done = 0; work_done < budget; work_done++) { | |
1267 | rxcp = be_rx_compl_get(adapter); | |
1268 | if (!rxcp) | |
1269 | break; | |
1270 | ||
1271 | if (do_lro(adapter, rxcp)) | |
1272 | be_rx_compl_process_lro(adapter, rxcp); | |
1273 | else | |
1274 | be_rx_compl_process(adapter, rxcp); | |
1275 | } | |
1276 | ||
1277 | lro_flush_all(&adapter->rx_obj.lro_mgr); | |
1278 | ||
1279 | /* Refill the queue */ | |
1280 | if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM) | |
1281 | be_post_rx_frags(adapter); | |
1282 | ||
1283 | /* All consumed */ | |
1284 | if (work_done < budget) { | |
1285 | napi_complete(napi); | |
1286 | be_cq_notify(&adapter->ctrl, rx_cq->id, true, work_done); | |
1287 | } else { | |
1288 | /* More to be consumed; continue with interrupts disabled */ | |
1289 | be_cq_notify(&adapter->ctrl, rx_cq->id, false, work_done); | |
1290 | } | |
1291 | return work_done; | |
1292 | } | |
1293 | ||
1294 | /* For TX we don't honour budget; consume everything */ | |
1295 | int be_poll_tx(struct napi_struct *napi, int budget) | |
1296 | { | |
1297 | struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); | |
1298 | struct be_adapter *adapter = | |
1299 | container_of(tx_eq, struct be_adapter, tx_eq); | |
1300 | struct be_tx_obj *tx_obj = &adapter->tx_obj; | |
1301 | struct be_queue_info *tx_cq = &tx_obj->cq; | |
1302 | struct be_queue_info *txq = &tx_obj->q; | |
1303 | struct be_eth_tx_compl *txcp; | |
1304 | u32 num_cmpl = 0; | |
1305 | u16 end_idx; | |
1306 | ||
1307 | while ((txcp = be_tx_compl_get(adapter))) { | |
1308 | end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, | |
1309 | wrb_index, txcp); | |
1310 | be_tx_compl_process(adapter, end_idx); | |
1311 | num_cmpl++; | |
1312 | } | |
1313 | ||
1314 | /* As Tx wrbs have been freed up, wake up netdev queue if | |
1315 | * it was stopped due to lack of tx wrbs. | |
1316 | */ | |
1317 | if (netif_queue_stopped(adapter->netdev) && | |
1318 | atomic_read(&txq->used) < txq->len / 2) { | |
1319 | netif_wake_queue(adapter->netdev); | |
1320 | } | |
1321 | ||
1322 | napi_complete(napi); | |
1323 | ||
1324 | be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl); | |
1325 | ||
1326 | drvr_stats(adapter)->be_tx_events++; | |
1327 | drvr_stats(adapter)->be_tx_compl += num_cmpl; | |
1328 | ||
1329 | return 1; | |
1330 | } | |
1331 | ||
ea1dae11 SP |
1332 | static void be_worker(struct work_struct *work) |
1333 | { | |
1334 | struct be_adapter *adapter = | |
1335 | container_of(work, struct be_adapter, work.work); | |
1336 | int status; | |
1337 | ||
1338 | /* Check link */ | |
1339 | be_link_status_update(adapter); | |
1340 | ||
1341 | /* Get Stats */ | |
1342 | status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd); | |
1343 | if (!status) | |
1344 | netdev_stats_update(adapter); | |
1345 | ||
1346 | /* Set EQ delay */ | |
1347 | be_rx_eqd_update(adapter); | |
1348 | ||
4097f663 SP |
1349 | be_tx_rate_update(adapter); |
1350 | be_rx_rate_update(adapter); | |
1351 | ||
ea1dae11 SP |
1352 | if (adapter->rx_post_starved) { |
1353 | adapter->rx_post_starved = false; | |
1354 | be_post_rx_frags(adapter); | |
1355 | } | |
1356 | ||
1357 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); | |
1358 | } | |
1359 | ||
6b7c5b94 SP |
1360 | static void be_msix_enable(struct be_adapter *adapter) |
1361 | { | |
1362 | int i, status; | |
1363 | ||
1364 | for (i = 0; i < BE_NUM_MSIX_VECTORS; i++) | |
1365 | adapter->msix_entries[i].entry = i; | |
1366 | ||
1367 | status = pci_enable_msix(adapter->pdev, adapter->msix_entries, | |
1368 | BE_NUM_MSIX_VECTORS); | |
1369 | if (status == 0) | |
1370 | adapter->msix_enabled = true; | |
1371 | return; | |
1372 | } | |
1373 | ||
1374 | static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) | |
1375 | { | |
1376 | return adapter->msix_entries[eq_id - | |
1377 | 8 * adapter->ctrl.pci_func].vector; | |
1378 | } | |
1379 | ||
1380 | static int be_msix_register(struct be_adapter *adapter) | |
1381 | { | |
1382 | struct net_device *netdev = adapter->netdev; | |
1383 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | |
1384 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | |
1385 | int status, vec; | |
1386 | ||
1387 | sprintf(tx_eq->desc, "%s-tx", netdev->name); | |
1388 | vec = be_msix_vec_get(adapter, tx_eq->q.id); | |
1389 | status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter); | |
1390 | if (status) | |
1391 | goto err; | |
1392 | ||
1393 | sprintf(rx_eq->desc, "%s-rx", netdev->name); | |
1394 | vec = be_msix_vec_get(adapter, rx_eq->q.id); | |
1395 | status = request_irq(vec, be_msix_rx, 0, rx_eq->desc, adapter); | |
1396 | if (status) { /* Free TX IRQ */ | |
1397 | vec = be_msix_vec_get(adapter, tx_eq->q.id); | |
1398 | free_irq(vec, adapter); | |
1399 | goto err; | |
1400 | } | |
1401 | return 0; | |
1402 | err: | |
1403 | dev_warn(&adapter->pdev->dev, | |
1404 | "MSIX Request IRQ failed - err %d\n", status); | |
1405 | pci_disable_msix(adapter->pdev); | |
1406 | adapter->msix_enabled = false; | |
1407 | return status; | |
1408 | } | |
1409 | ||
1410 | static int be_irq_register(struct be_adapter *adapter) | |
1411 | { | |
1412 | struct net_device *netdev = adapter->netdev; | |
1413 | int status; | |
1414 | ||
1415 | if (adapter->msix_enabled) { | |
1416 | status = be_msix_register(adapter); | |
1417 | if (status == 0) | |
1418 | goto done; | |
1419 | } | |
1420 | ||
1421 | /* INTx */ | |
1422 | netdev->irq = adapter->pdev->irq; | |
1423 | status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name, | |
1424 | adapter); | |
1425 | if (status) { | |
1426 | dev_err(&adapter->pdev->dev, | |
1427 | "INTx request IRQ failed - err %d\n", status); | |
1428 | return status; | |
1429 | } | |
1430 | done: | |
1431 | adapter->isr_registered = true; | |
1432 | return 0; | |
1433 | } | |
1434 | ||
1435 | static void be_irq_unregister(struct be_adapter *adapter) | |
1436 | { | |
1437 | struct net_device *netdev = adapter->netdev; | |
1438 | int vec; | |
1439 | ||
1440 | if (!adapter->isr_registered) | |
1441 | return; | |
1442 | ||
1443 | /* INTx */ | |
1444 | if (!adapter->msix_enabled) { | |
1445 | free_irq(netdev->irq, adapter); | |
1446 | goto done; | |
1447 | } | |
1448 | ||
1449 | /* MSIx */ | |
1450 | vec = be_msix_vec_get(adapter, adapter->tx_eq.q.id); | |
1451 | free_irq(vec, adapter); | |
1452 | vec = be_msix_vec_get(adapter, adapter->rx_eq.q.id); | |
1453 | free_irq(vec, adapter); | |
1454 | done: | |
1455 | adapter->isr_registered = false; | |
1456 | return; | |
1457 | } | |
1458 | ||
1459 | static int be_open(struct net_device *netdev) | |
1460 | { | |
1461 | struct be_adapter *adapter = netdev_priv(netdev); | |
1462 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
1463 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | |
1464 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | |
1465 | u32 if_flags; | |
1466 | int status; | |
1467 | ||
1468 | if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS | | |
1469 | BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED | | |
1470 | BE_IF_FLAGS_PASS_L3L4_ERRORS; | |
1471 | status = be_cmd_if_create(ctrl, if_flags, netdev->dev_addr, | |
1472 | false/* pmac_invalid */, &adapter->if_handle, | |
1473 | &adapter->pmac_id); | |
1474 | if (status != 0) | |
1475 | goto do_none; | |
1476 | ||
1ab1ab75 SP |
1477 | be_vid_config(netdev); |
1478 | ||
6b7c5b94 SP |
1479 | status = be_cmd_set_flow_control(ctrl, true, true); |
1480 | if (status != 0) | |
1481 | goto if_destroy; | |
1482 | ||
1483 | status = be_tx_queues_create(adapter); | |
1484 | if (status != 0) | |
1485 | goto if_destroy; | |
1486 | ||
1487 | status = be_rx_queues_create(adapter); | |
1488 | if (status != 0) | |
1489 | goto tx_qs_destroy; | |
1490 | ||
1491 | /* First time posting */ | |
1492 | be_post_rx_frags(adapter); | |
1493 | ||
1494 | napi_enable(&rx_eq->napi); | |
1495 | napi_enable(&tx_eq->napi); | |
1496 | ||
1497 | be_irq_register(adapter); | |
1498 | ||
1499 | be_intr_set(ctrl, true); | |
1500 | ||
1501 | /* The evt queues are created in the unarmed state; arm them */ | |
1502 | be_eq_notify(ctrl, rx_eq->q.id, true, false, 0); | |
1503 | be_eq_notify(ctrl, tx_eq->q.id, true, false, 0); | |
1504 | ||
1505 | /* The compl queues are created in the unarmed state; arm them */ | |
1506 | be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0); | |
1507 | be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0); | |
1508 | ||
1509 | be_link_status_update(adapter); | |
1510 | ||
1511 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); | |
1512 | return 0; | |
1513 | ||
1514 | tx_qs_destroy: | |
1515 | be_tx_queues_destroy(adapter); | |
1516 | if_destroy: | |
1517 | be_cmd_if_destroy(ctrl, adapter->if_handle); | |
1518 | do_none: | |
1519 | return status; | |
1520 | } | |
1521 | ||
1522 | static int be_close(struct net_device *netdev) | |
1523 | { | |
1524 | struct be_adapter *adapter = netdev_priv(netdev); | |
1525 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
1526 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | |
1527 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | |
1528 | int vec; | |
1529 | ||
1530 | cancel_delayed_work(&adapter->work); | |
1531 | ||
1532 | netif_stop_queue(netdev); | |
1533 | netif_carrier_off(netdev); | |
1534 | adapter->link.speed = PHY_LINK_SPEED_ZERO; | |
1535 | ||
1536 | be_intr_set(ctrl, false); | |
1537 | ||
1538 | if (adapter->msix_enabled) { | |
1539 | vec = be_msix_vec_get(adapter, tx_eq->q.id); | |
1540 | synchronize_irq(vec); | |
1541 | vec = be_msix_vec_get(adapter, rx_eq->q.id); | |
1542 | synchronize_irq(vec); | |
1543 | } else { | |
1544 | synchronize_irq(netdev->irq); | |
1545 | } | |
1546 | be_irq_unregister(adapter); | |
1547 | ||
1548 | napi_disable(&rx_eq->napi); | |
1549 | napi_disable(&tx_eq->napi); | |
1550 | ||
1551 | be_rx_queues_destroy(adapter); | |
1552 | be_tx_queues_destroy(adapter); | |
1553 | ||
1554 | be_cmd_if_destroy(ctrl, adapter->if_handle); | |
1555 | return 0; | |
1556 | } | |
1557 | ||
1558 | static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, | |
1559 | void **ip_hdr, void **tcpudp_hdr, | |
1560 | u64 *hdr_flags, void *priv) | |
1561 | { | |
1562 | struct ethhdr *eh; | |
1563 | struct vlan_ethhdr *veh; | |
1564 | struct iphdr *iph; | |
1565 | u8 *va = page_address(frag->page) + frag->page_offset; | |
1566 | unsigned long ll_hlen; | |
1567 | ||
1568 | prefetch(va); | |
1569 | eh = (struct ethhdr *)va; | |
1570 | *mac_hdr = eh; | |
1571 | ll_hlen = ETH_HLEN; | |
1572 | if (eh->h_proto != htons(ETH_P_IP)) { | |
1573 | if (eh->h_proto == htons(ETH_P_8021Q)) { | |
1574 | veh = (struct vlan_ethhdr *)va; | |
1575 | if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) | |
1576 | return -1; | |
1577 | ||
1578 | ll_hlen += VLAN_HLEN; | |
1579 | } else { | |
1580 | return -1; | |
1581 | } | |
1582 | } | |
1583 | *hdr_flags = LRO_IPV4; | |
1584 | iph = (struct iphdr *)(va + ll_hlen); | |
1585 | *ip_hdr = iph; | |
1586 | if (iph->protocol != IPPROTO_TCP) | |
1587 | return -1; | |
1588 | *hdr_flags |= LRO_TCP; | |
1589 | *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2); | |
1590 | ||
1591 | return 0; | |
1592 | } | |
1593 | ||
1594 | static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev) | |
1595 | { | |
1596 | struct net_lro_mgr *lro_mgr; | |
1597 | ||
1598 | lro_mgr = &adapter->rx_obj.lro_mgr; | |
1599 | lro_mgr->dev = netdev; | |
1600 | lro_mgr->features = LRO_F_NAPI; | |
1601 | lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; | |
1602 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; | |
1603 | lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS; | |
1604 | lro_mgr->lro_arr = adapter->rx_obj.lro_desc; | |
1605 | lro_mgr->get_frag_header = be_get_frag_header; | |
1606 | lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME; | |
1607 | } | |
1608 | ||
1609 | static struct net_device_ops be_netdev_ops = { | |
1610 | .ndo_open = be_open, | |
1611 | .ndo_stop = be_close, | |
1612 | .ndo_start_xmit = be_xmit, | |
1613 | .ndo_get_stats = be_get_stats, | |
1614 | .ndo_set_rx_mode = be_set_multicast_list, | |
1615 | .ndo_set_mac_address = be_mac_addr_set, | |
1616 | .ndo_change_mtu = be_change_mtu, | |
1617 | .ndo_validate_addr = eth_validate_addr, | |
1618 | .ndo_vlan_rx_register = be_vlan_register, | |
1619 | .ndo_vlan_rx_add_vid = be_vlan_add_vid, | |
1620 | .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, | |
1621 | }; | |
1622 | ||
1623 | static void be_netdev_init(struct net_device *netdev) | |
1624 | { | |
1625 | struct be_adapter *adapter = netdev_priv(netdev); | |
1626 | ||
1627 | netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | | |
1628 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | | |
1629 | NETIF_F_IPV6_CSUM | NETIF_F_TSO6; | |
1630 | ||
1631 | netdev->flags |= IFF_MULTICAST; | |
1632 | ||
1633 | BE_SET_NETDEV_OPS(netdev, &be_netdev_ops); | |
1634 | ||
1635 | SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); | |
1636 | ||
1637 | be_lro_init(adapter, netdev); | |
1638 | ||
1639 | netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, | |
1640 | BE_NAPI_WEIGHT); | |
1641 | netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx, | |
1642 | BE_NAPI_WEIGHT); | |
1643 | ||
1644 | netif_carrier_off(netdev); | |
1645 | netif_stop_queue(netdev); | |
1646 | } | |
1647 | ||
1648 | static void be_unmap_pci_bars(struct be_adapter *adapter) | |
1649 | { | |
1650 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
1651 | if (ctrl->csr) | |
1652 | iounmap(ctrl->csr); | |
1653 | if (ctrl->db) | |
1654 | iounmap(ctrl->db); | |
1655 | if (ctrl->pcicfg) | |
1656 | iounmap(ctrl->pcicfg); | |
1657 | } | |
1658 | ||
1659 | static int be_map_pci_bars(struct be_adapter *adapter) | |
1660 | { | |
1661 | u8 __iomem *addr; | |
1662 | ||
1663 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), | |
1664 | pci_resource_len(adapter->pdev, 2)); | |
1665 | if (addr == NULL) | |
1666 | return -ENOMEM; | |
1667 | adapter->ctrl.csr = addr; | |
1668 | ||
1669 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4), | |
1670 | 128 * 1024); | |
1671 | if (addr == NULL) | |
1672 | goto pci_map_err; | |
1673 | adapter->ctrl.db = addr; | |
1674 | ||
1675 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1), | |
1676 | pci_resource_len(adapter->pdev, 1)); | |
1677 | if (addr == NULL) | |
1678 | goto pci_map_err; | |
1679 | adapter->ctrl.pcicfg = addr; | |
1680 | ||
1681 | return 0; | |
1682 | pci_map_err: | |
1683 | be_unmap_pci_bars(adapter); | |
1684 | return -ENOMEM; | |
1685 | } | |
1686 | ||
1687 | ||
1688 | static void be_ctrl_cleanup(struct be_adapter *adapter) | |
1689 | { | |
1690 | struct be_dma_mem *mem = &adapter->ctrl.mbox_mem_alloced; | |
1691 | ||
1692 | be_unmap_pci_bars(adapter); | |
1693 | ||
1694 | if (mem->va) | |
1695 | pci_free_consistent(adapter->pdev, mem->size, | |
1696 | mem->va, mem->dma); | |
1697 | } | |
1698 | ||
1699 | /* Initialize the mbox required to send cmds to BE */ | |
1700 | static int be_ctrl_init(struct be_adapter *adapter) | |
1701 | { | |
1702 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
1703 | struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; | |
1704 | struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; | |
1705 | int status; | |
1706 | u32 val; | |
1707 | ||
1708 | status = be_map_pci_bars(adapter); | |
1709 | if (status) | |
1710 | return status; | |
1711 | ||
1712 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; | |
1713 | mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, | |
1714 | mbox_mem_alloc->size, &mbox_mem_alloc->dma); | |
1715 | if (!mbox_mem_alloc->va) { | |
1716 | be_unmap_pci_bars(adapter); | |
1717 | return -1; | |
1718 | } | |
1719 | mbox_mem_align->size = sizeof(struct be_mcc_mailbox); | |
1720 | mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); | |
1721 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); | |
1722 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); | |
1723 | spin_lock_init(&ctrl->cmd_lock); | |
1724 | ||
1725 | val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); | |
1726 | ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) & | |
1727 | MEMBAR_CTRL_INT_CTRL_PFUNC_MASK; | |
1728 | return 0; | |
1729 | } | |
1730 | ||
1731 | static void be_stats_cleanup(struct be_adapter *adapter) | |
1732 | { | |
1733 | struct be_stats_obj *stats = &adapter->stats; | |
1734 | struct be_dma_mem *cmd = &stats->cmd; | |
1735 | ||
1736 | if (cmd->va) | |
1737 | pci_free_consistent(adapter->pdev, cmd->size, | |
1738 | cmd->va, cmd->dma); | |
1739 | } | |
1740 | ||
1741 | static int be_stats_init(struct be_adapter *adapter) | |
1742 | { | |
1743 | struct be_stats_obj *stats = &adapter->stats; | |
1744 | struct be_dma_mem *cmd = &stats->cmd; | |
1745 | ||
1746 | cmd->size = sizeof(struct be_cmd_req_get_stats); | |
1747 | cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); | |
1748 | if (cmd->va == NULL) | |
1749 | return -1; | |
1750 | return 0; | |
1751 | } | |
1752 | ||
1753 | static void __devexit be_remove(struct pci_dev *pdev) | |
1754 | { | |
1755 | struct be_adapter *adapter = pci_get_drvdata(pdev); | |
1756 | if (!adapter) | |
1757 | return; | |
1758 | ||
1759 | unregister_netdev(adapter->netdev); | |
1760 | ||
1761 | be_stats_cleanup(adapter); | |
1762 | ||
1763 | be_ctrl_cleanup(adapter); | |
1764 | ||
1765 | if (adapter->msix_enabled) { | |
1766 | pci_disable_msix(adapter->pdev); | |
1767 | adapter->msix_enabled = false; | |
1768 | } | |
1769 | ||
1770 | pci_set_drvdata(pdev, NULL); | |
1771 | pci_release_regions(pdev); | |
1772 | pci_disable_device(pdev); | |
1773 | ||
1774 | free_netdev(adapter->netdev); | |
1775 | } | |
1776 | ||
1777 | static int be_hw_up(struct be_adapter *adapter) | |
1778 | { | |
1779 | struct be_ctrl_info *ctrl = &adapter->ctrl; | |
1780 | int status; | |
1781 | ||
1782 | status = be_cmd_POST(ctrl); | |
1783 | if (status) | |
1784 | return status; | |
1785 | ||
1786 | status = be_cmd_get_fw_ver(ctrl, adapter->fw_ver); | |
1787 | if (status) | |
1788 | return status; | |
1789 | ||
1790 | status = be_cmd_query_fw_cfg(ctrl, &adapter->port_num); | |
1791 | return status; | |
1792 | } | |
1793 | ||
1794 | static int __devinit be_probe(struct pci_dev *pdev, | |
1795 | const struct pci_device_id *pdev_id) | |
1796 | { | |
1797 | int status = 0; | |
1798 | struct be_adapter *adapter; | |
1799 | struct net_device *netdev; | |
1800 | struct be_ctrl_info *ctrl; | |
1801 | u8 mac[ETH_ALEN]; | |
1802 | ||
1803 | status = pci_enable_device(pdev); | |
1804 | if (status) | |
1805 | goto do_none; | |
1806 | ||
1807 | status = pci_request_regions(pdev, DRV_NAME); | |
1808 | if (status) | |
1809 | goto disable_dev; | |
1810 | pci_set_master(pdev); | |
1811 | ||
1812 | netdev = alloc_etherdev(sizeof(struct be_adapter)); | |
1813 | if (netdev == NULL) { | |
1814 | status = -ENOMEM; | |
1815 | goto rel_reg; | |
1816 | } | |
1817 | adapter = netdev_priv(netdev); | |
1818 | adapter->pdev = pdev; | |
1819 | pci_set_drvdata(pdev, adapter); | |
1820 | adapter->netdev = netdev; | |
1821 | ||
1822 | be_msix_enable(adapter); | |
1823 | ||
e930438c | 1824 | status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
6b7c5b94 SP |
1825 | if (!status) { |
1826 | netdev->features |= NETIF_F_HIGHDMA; | |
1827 | } else { | |
e930438c | 1828 | status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
6b7c5b94 SP |
1829 | if (status) { |
1830 | dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); | |
1831 | goto free_netdev; | |
1832 | } | |
1833 | } | |
1834 | ||
1835 | ctrl = &adapter->ctrl; | |
1836 | status = be_ctrl_init(adapter); | |
1837 | if (status) | |
1838 | goto free_netdev; | |
1839 | ||
1840 | status = be_stats_init(adapter); | |
1841 | if (status) | |
1842 | goto ctrl_clean; | |
1843 | ||
1844 | status = be_hw_up(adapter); | |
1845 | if (status) | |
1846 | goto stats_clean; | |
1847 | ||
1848 | status = be_cmd_mac_addr_query(ctrl, mac, MAC_ADDRESS_TYPE_NETWORK, | |
1849 | true /* permanent */, 0); | |
1850 | if (status) | |
1851 | goto stats_clean; | |
1852 | memcpy(netdev->dev_addr, mac, ETH_ALEN); | |
1853 | ||
1854 | INIT_DELAYED_WORK(&adapter->work, be_worker); | |
1855 | be_netdev_init(netdev); | |
1856 | SET_NETDEV_DEV(netdev, &adapter->pdev->dev); | |
1857 | ||
1858 | status = register_netdev(netdev); | |
1859 | if (status != 0) | |
1860 | goto stats_clean; | |
1861 | ||
c4ca2374 | 1862 | dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); |
6b7c5b94 SP |
1863 | return 0; |
1864 | ||
1865 | stats_clean: | |
1866 | be_stats_cleanup(adapter); | |
1867 | ctrl_clean: | |
1868 | be_ctrl_cleanup(adapter); | |
1869 | free_netdev: | |
1870 | free_netdev(adapter->netdev); | |
1871 | rel_reg: | |
1872 | pci_release_regions(pdev); | |
1873 | disable_dev: | |
1874 | pci_disable_device(pdev); | |
1875 | do_none: | |
c4ca2374 | 1876 | dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev)); |
6b7c5b94 SP |
1877 | return status; |
1878 | } | |
1879 | ||
1880 | static int be_suspend(struct pci_dev *pdev, pm_message_t state) | |
1881 | { | |
1882 | struct be_adapter *adapter = pci_get_drvdata(pdev); | |
1883 | struct net_device *netdev = adapter->netdev; | |
1884 | ||
1885 | netif_device_detach(netdev); | |
1886 | if (netif_running(netdev)) { | |
1887 | rtnl_lock(); | |
1888 | be_close(netdev); | |
1889 | rtnl_unlock(); | |
1890 | } | |
1891 | ||
1892 | pci_save_state(pdev); | |
1893 | pci_disable_device(pdev); | |
1894 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
1895 | return 0; | |
1896 | } | |
1897 | ||
1898 | static int be_resume(struct pci_dev *pdev) | |
1899 | { | |
1900 | int status = 0; | |
1901 | struct be_adapter *adapter = pci_get_drvdata(pdev); | |
1902 | struct net_device *netdev = adapter->netdev; | |
1903 | ||
1904 | netif_device_detach(netdev); | |
1905 | ||
1906 | status = pci_enable_device(pdev); | |
1907 | if (status) | |
1908 | return status; | |
1909 | ||
1910 | pci_set_power_state(pdev, 0); | |
1911 | pci_restore_state(pdev); | |
1912 | ||
6b7c5b94 SP |
1913 | if (netif_running(netdev)) { |
1914 | rtnl_lock(); | |
1915 | be_open(netdev); | |
1916 | rtnl_unlock(); | |
1917 | } | |
1918 | netif_device_attach(netdev); | |
1919 | return 0; | |
1920 | } | |
1921 | ||
1922 | static struct pci_driver be_driver = { | |
1923 | .name = DRV_NAME, | |
1924 | .id_table = be_dev_ids, | |
1925 | .probe = be_probe, | |
1926 | .remove = be_remove, | |
1927 | .suspend = be_suspend, | |
1928 | .resume = be_resume | |
1929 | }; | |
1930 | ||
1931 | static int __init be_init_module(void) | |
1932 | { | |
1933 | if (rx_frag_size != 8192 && rx_frag_size != 4096 | |
1934 | && rx_frag_size != 2048) { | |
1935 | printk(KERN_WARNING DRV_NAME | |
1936 | " : Module param rx_frag_size must be 2048/4096/8192." | |
1937 | " Using 2048\n"); | |
1938 | rx_frag_size = 2048; | |
1939 | } | |
1940 | /* Ensure rx_frag_size is aligned to chache line */ | |
1941 | if (SKB_DATA_ALIGN(rx_frag_size) != rx_frag_size) { | |
1942 | printk(KERN_WARNING DRV_NAME | |
1943 | " : Bad module param rx_frag_size. Using 2048\n"); | |
1944 | rx_frag_size = 2048; | |
1945 | } | |
1946 | ||
1947 | return pci_register_driver(&be_driver); | |
1948 | } | |
1949 | module_init(be_init_module); | |
1950 | ||
1951 | static void __exit be_exit_module(void) | |
1952 | { | |
1953 | pci_unregister_driver(&be_driver); | |
1954 | } | |
1955 | module_exit(be_exit_module); |