Commit | Line | Data |
---|---|---|
703da5a1 RV |
1 | /****************************************************************************** |
2 | * This software may be used and distributed according to the terms of | |
3 | * the GNU General Public License (GPL), incorporated herein by reference. | |
4 | * Drivers based on or derived from this code fall under the GPL and must | |
5 | * retain the authorship, copyright and license notice. This file is not | |
6 | * a complete program and may only be used when the entire operating | |
7 | * system is licensed under the GPL. | |
8 | * See the file COPYING in this distribution for more information. | |
9 | * | |
926bd900 | 10 | * vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O |
703da5a1 | 11 | * Virtualized Server Adapter. |
926bd900 | 12 | * Copyright(c) 2002-2010 Exar Corp. |
703da5a1 RV |
13 | * |
14 | * The module loadable parameters that are supported by the driver and a brief | |
15 | * explanation of all the variables: | |
16 | * vlan_tag_strip: | |
17 | * Strip VLAN Tag enable/disable. Instructs the device to remove | |
18 | * the VLAN tag from all received tagged frames that are not | |
19 | * replicated at the internal L2 switch. | |
20 | * 0 - Do not strip the VLAN tag. | |
21 | * 1 - Strip the VLAN tag. | |
22 | * | |
23 | * addr_learn_en: | |
24 | * Enable learning the mac address of the guest OS interface in | |
25 | * a virtualization environment. | |
26 | * 0 - DISABLE | |
27 | * 1 - ENABLE | |
28 | * | |
29 | * max_config_port: | |
30 | * Maximum number of port to be supported. | |
31 | * MIN -1 and MAX - 2 | |
32 | * | |
33 | * max_config_vpath: | |
34 | * This configures the maximum no of VPATH configures for each | |
35 | * device function. | |
36 | * MIN - 1 and MAX - 17 | |
37 | * | |
38 | * max_config_dev: | |
39 | * This configures maximum no of Device function to be enabled. | |
40 | * MIN - 1 and MAX - 17 | |
41 | * | |
42 | ******************************************************************************/ | |
43 | ||
75f5e1c6 JP |
44 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
45 | ||
53515734 | 46 | #include <linux/bitops.h> |
703da5a1 | 47 | #include <linux/if_vlan.h> |
a6b7a407 | 48 | #include <linux/interrupt.h> |
703da5a1 | 49 | #include <linux/pci.h> |
5a0e3ad6 | 50 | #include <linux/slab.h> |
2b05e002 | 51 | #include <linux/tcp.h> |
703da5a1 RV |
52 | #include <net/ip.h> |
53 | #include <linux/netdevice.h> | |
54 | #include <linux/etherdevice.h> | |
e8ac1756 | 55 | #include <linux/firmware.h> |
b81b3733 | 56 | #include <linux/net_tstamp.h> |
70c71606 | 57 | #include <linux/prefetch.h> |
9d9779e7 | 58 | #include <linux/module.h> |
703da5a1 RV |
59 | #include "vxge-main.h" |
60 | #include "vxge-reg.h" | |
61 | ||
62 | MODULE_LICENSE("Dual BSD/GPL"); | |
63 | MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O" | |
64 | "Virtualized Server Adapter"); | |
65 | ||
a3aa1884 | 66 | static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = { |
703da5a1 RV |
67 | {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID, |
68 | PCI_ANY_ID}, | |
69 | {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID, | |
70 | PCI_ANY_ID}, | |
71 | {0} | |
72 | }; | |
73 | ||
74 | MODULE_DEVICE_TABLE(pci, vxge_id_table); | |
75 | ||
76 | VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE); | |
77 | VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT); | |
78 | VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT); | |
79 | VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT); | |
80 | VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT); | |
81 | VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV); | |
82 | ||
83 | static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] = | |
84 | {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31}; | |
85 | static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] = | |
86 | {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF}; | |
87 | module_param_array(bw_percentage, uint, NULL, 0); | |
88 | ||
89 | static struct vxge_drv_config *driver_config; | |
90 | ||
91 | static inline int is_vxge_card_up(struct vxgedev *vdev) | |
92 | { | |
93 | return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); | |
94 | } | |
95 | ||
96 | static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) | |
97 | { | |
ff67df55 BL |
98 | struct sk_buff **skb_ptr = NULL; |
99 | struct sk_buff **temp; | |
100 | #define NR_SKB_COMPLETED 128 | |
101 | struct sk_buff *completed[NR_SKB_COMPLETED]; | |
102 | int more; | |
703da5a1 | 103 | |
ff67df55 BL |
104 | do { |
105 | more = 0; | |
106 | skb_ptr = completed; | |
107 | ||
98f45da2 | 108 | if (__netif_tx_trylock(fifo->txq)) { |
ff67df55 BL |
109 | vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr, |
110 | NR_SKB_COMPLETED, &more); | |
98f45da2 | 111 | __netif_tx_unlock(fifo->txq); |
ff67df55 | 112 | } |
98f45da2 | 113 | |
ff67df55 BL |
114 | /* free SKBs */ |
115 | for (temp = completed; temp != skb_ptr; temp++) | |
116 | dev_kfree_skb_irq(*temp); | |
98f45da2 | 117 | } while (more); |
703da5a1 RV |
118 | } |
119 | ||
120 | static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev) | |
121 | { | |
122 | int i; | |
123 | ||
124 | /* Complete all transmits */ | |
125 | for (i = 0; i < vdev->no_of_vpath; i++) | |
126 | VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo); | |
127 | } | |
128 | ||
129 | static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev) | |
130 | { | |
131 | int i; | |
132 | struct vxge_ring *ring; | |
133 | ||
134 | /* Complete all receives*/ | |
135 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
136 | ring = &vdev->vpaths[i].ring; | |
137 | vxge_hw_vpath_poll_rx(ring->handle); | |
138 | } | |
139 | } | |
140 | ||
703da5a1 RV |
141 | /* |
142 | * vxge_callback_link_up | |
143 | * | |
144 | * This function is called during interrupt context to notify link up state | |
145 | * change. | |
146 | */ | |
528f7272 | 147 | static void vxge_callback_link_up(struct __vxge_hw_device *hldev) |
703da5a1 RV |
148 | { |
149 | struct net_device *dev = hldev->ndev; | |
5f54cebb | 150 | struct vxgedev *vdev = netdev_priv(dev); |
703da5a1 RV |
151 | |
152 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", | |
153 | vdev->ndev->name, __func__, __LINE__); | |
75f5e1c6 | 154 | netdev_notice(vdev->ndev, "Link Up\n"); |
703da5a1 RV |
155 | vdev->stats.link_up++; |
156 | ||
157 | netif_carrier_on(vdev->ndev); | |
d03848e0 | 158 | netif_tx_wake_all_queues(vdev->ndev); |
703da5a1 RV |
159 | |
160 | vxge_debug_entryexit(VXGE_TRACE, | |
161 | "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); | |
162 | } | |
163 | ||
164 | /* | |
165 | * vxge_callback_link_down | |
166 | * | |
167 | * This function is called during interrupt context to notify link down state | |
168 | * change. | |
169 | */ | |
528f7272 | 170 | static void vxge_callback_link_down(struct __vxge_hw_device *hldev) |
703da5a1 RV |
171 | { |
172 | struct net_device *dev = hldev->ndev; | |
5f54cebb | 173 | struct vxgedev *vdev = netdev_priv(dev); |
703da5a1 RV |
174 | |
175 | vxge_debug_entryexit(VXGE_TRACE, | |
176 | "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); | |
75f5e1c6 | 177 | netdev_notice(vdev->ndev, "Link Down\n"); |
703da5a1 RV |
178 | |
179 | vdev->stats.link_down++; | |
180 | netif_carrier_off(vdev->ndev); | |
d03848e0 | 181 | netif_tx_stop_all_queues(vdev->ndev); |
703da5a1 RV |
182 | |
183 | vxge_debug_entryexit(VXGE_TRACE, | |
184 | "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); | |
185 | } | |
186 | ||
187 | /* | |
188 | * vxge_rx_alloc | |
189 | * | |
190 | * Allocate SKB. | |
191 | */ | |
528f7272 | 192 | static struct sk_buff * |
703da5a1 RV |
193 | vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) |
194 | { | |
195 | struct net_device *dev; | |
196 | struct sk_buff *skb; | |
197 | struct vxge_rx_priv *rx_priv; | |
198 | ||
199 | dev = ring->ndev; | |
200 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", | |
201 | ring->ndev->name, __func__, __LINE__); | |
202 | ||
203 | rx_priv = vxge_hw_ring_rxd_private_get(dtrh); | |
204 | ||
205 | /* try to allocate skb first. this one may fail */ | |
206 | skb = netdev_alloc_skb(dev, skb_size + | |
207 | VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); | |
208 | if (skb == NULL) { | |
209 | vxge_debug_mem(VXGE_ERR, | |
210 | "%s: out of memory to allocate SKB", dev->name); | |
211 | ring->stats.skb_alloc_fail++; | |
212 | return NULL; | |
213 | } | |
214 | ||
215 | vxge_debug_mem(VXGE_TRACE, | |
216 | "%s: %s:%d Skb : 0x%p", ring->ndev->name, | |
217 | __func__, __LINE__, skb); | |
218 | ||
219 | skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); | |
220 | ||
221 | rx_priv->skb = skb; | |
ea11bbe0 | 222 | rx_priv->skb_data = NULL; |
703da5a1 RV |
223 | rx_priv->data_size = skb_size; |
224 | vxge_debug_entryexit(VXGE_TRACE, | |
225 | "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); | |
226 | ||
227 | return skb; | |
228 | } | |
229 | ||
230 | /* | |
231 | * vxge_rx_map | |
232 | */ | |
233 | static int vxge_rx_map(void *dtrh, struct vxge_ring *ring) | |
234 | { | |
235 | struct vxge_rx_priv *rx_priv; | |
236 | dma_addr_t dma_addr; | |
237 | ||
238 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", | |
239 | ring->ndev->name, __func__, __LINE__); | |
240 | rx_priv = vxge_hw_ring_rxd_private_get(dtrh); | |
241 | ||
ea11bbe0 BL |
242 | rx_priv->skb_data = rx_priv->skb->data; |
243 | dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, | |
703da5a1 RV |
244 | rx_priv->data_size, PCI_DMA_FROMDEVICE); |
245 | ||
fa15e99b | 246 | if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) { |
703da5a1 RV |
247 | ring->stats.pci_map_fail++; |
248 | return -EIO; | |
249 | } | |
250 | vxge_debug_mem(VXGE_TRACE, | |
251 | "%s: %s:%d 1 buffer mode dma_addr = 0x%llx", | |
252 | ring->ndev->name, __func__, __LINE__, | |
253 | (unsigned long long)dma_addr); | |
254 | vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size); | |
255 | ||
256 | rx_priv->data_dma = dma_addr; | |
257 | vxge_debug_entryexit(VXGE_TRACE, | |
258 | "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); | |
259 | ||
260 | return 0; | |
261 | } | |
262 | ||
263 | /* | |
264 | * vxge_rx_initial_replenish | |
265 | * Allocation of RxD as an initial replenish procedure. | |
266 | */ | |
267 | static enum vxge_hw_status | |
268 | vxge_rx_initial_replenish(void *dtrh, void *userdata) | |
269 | { | |
270 | struct vxge_ring *ring = (struct vxge_ring *)userdata; | |
271 | struct vxge_rx_priv *rx_priv; | |
272 | ||
273 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", | |
274 | ring->ndev->name, __func__, __LINE__); | |
275 | if (vxge_rx_alloc(dtrh, ring, | |
276 | VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL) | |
277 | return VXGE_HW_FAIL; | |
278 | ||
279 | if (vxge_rx_map(dtrh, ring)) { | |
280 | rx_priv = vxge_hw_ring_rxd_private_get(dtrh); | |
281 | dev_kfree_skb(rx_priv->skb); | |
282 | ||
283 | return VXGE_HW_FAIL; | |
284 | } | |
285 | vxge_debug_entryexit(VXGE_TRACE, | |
286 | "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); | |
287 | ||
288 | return VXGE_HW_OK; | |
289 | } | |
290 | ||
291 | static inline void | |
292 | vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan, | |
293 | int pkt_length, struct vxge_hw_ring_rxd_info *ext_info) | |
294 | { | |
295 | ||
296 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", | |
297 | ring->ndev->name, __func__, __LINE__); | |
298 | skb_record_rx_queue(skb, ring->driver_id); | |
299 | skb->protocol = eth_type_trans(skb, ring->ndev); | |
300 | ||
62ea0557 | 301 | u64_stats_update_begin(&ring->stats.syncp); |
703da5a1 RV |
302 | ring->stats.rx_frms++; |
303 | ring->stats.rx_bytes += pkt_length; | |
304 | ||
305 | if (skb->pkt_type == PACKET_MULTICAST) | |
306 | ring->stats.rx_mcast++; | |
62ea0557 | 307 | u64_stats_update_end(&ring->stats.syncp); |
703da5a1 RV |
308 | |
309 | vxge_debug_rx(VXGE_TRACE, | |
310 | "%s: %s:%d skb protocol = %d", | |
311 | ring->ndev->name, __func__, __LINE__, skb->protocol); | |
312 | ||
53515734 JP |
313 | if (ext_info->vlan && |
314 | ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) | |
315 | __vlan_hwaccel_put_tag(skb, ext_info->vlan); | |
316 | napi_gro_receive(ring->napi_p, skb); | |
feb990d4 | 317 | |
703da5a1 RV |
318 | vxge_debug_entryexit(VXGE_TRACE, |
319 | "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); | |
320 | } | |
321 | ||
322 | static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring, | |
323 | struct vxge_rx_priv *rx_priv) | |
324 | { | |
325 | pci_dma_sync_single_for_device(ring->pdev, | |
326 | rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE); | |
327 | ||
328 | vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size); | |
329 | vxge_hw_ring_rxd_pre_post(ring->handle, dtr); | |
330 | } | |
331 | ||
332 | static inline void vxge_post(int *dtr_cnt, void **first_dtr, | |
333 | void *post_dtr, struct __vxge_hw_ring *ringh) | |
334 | { | |
335 | int dtr_count = *dtr_cnt; | |
336 | if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) { | |
337 | if (*first_dtr) | |
338 | vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr); | |
339 | *first_dtr = post_dtr; | |
340 | } else | |
341 | vxge_hw_ring_rxd_post_post(ringh, post_dtr); | |
342 | dtr_count++; | |
343 | *dtr_cnt = dtr_count; | |
344 | } | |
345 | ||
346 | /* | |
347 | * vxge_rx_1b_compl | |
348 | * | |
349 | * If the interrupt is because of a received frame or if the receive ring | |
350 | * contains fresh as yet un-processed frames, this function is called. | |
351 | */ | |
42821a5b | 352 | static enum vxge_hw_status |
703da5a1 RV |
353 | vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, |
354 | u8 t_code, void *userdata) | |
355 | { | |
356 | struct vxge_ring *ring = (struct vxge_ring *)userdata; | |
b81b3733 | 357 | struct net_device *dev = ring->ndev; |
703da5a1 RV |
358 | unsigned int dma_sizes; |
359 | void *first_dtr = NULL; | |
360 | int dtr_cnt = 0; | |
361 | int data_size; | |
362 | dma_addr_t data_dma; | |
363 | int pkt_length; | |
364 | struct sk_buff *skb; | |
365 | struct vxge_rx_priv *rx_priv; | |
366 | struct vxge_hw_ring_rxd_info ext_info; | |
367 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", | |
368 | ring->ndev->name, __func__, __LINE__); | |
703da5a1 RV |
369 | |
370 | do { | |
3f23e436 | 371 | prefetch((char *)dtr + L1_CACHE_BYTES); |
703da5a1 RV |
372 | rx_priv = vxge_hw_ring_rxd_private_get(dtr); |
373 | skb = rx_priv->skb; | |
374 | data_size = rx_priv->data_size; | |
375 | data_dma = rx_priv->data_dma; | |
ea11bbe0 | 376 | prefetch(rx_priv->skb_data); |
703da5a1 RV |
377 | |
378 | vxge_debug_rx(VXGE_TRACE, | |
379 | "%s: %s:%d skb = 0x%p", | |
380 | ring->ndev->name, __func__, __LINE__, skb); | |
381 | ||
382 | vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes); | |
383 | pkt_length = dma_sizes; | |
384 | ||
22fa125e SH |
385 | pkt_length -= ETH_FCS_LEN; |
386 | ||
703da5a1 RV |
387 | vxge_debug_rx(VXGE_TRACE, |
388 | "%s: %s:%d Packet Length = %d", | |
389 | ring->ndev->name, __func__, __LINE__, pkt_length); | |
390 | ||
391 | vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info); | |
392 | ||
393 | /* check skb validity */ | |
394 | vxge_assert(skb); | |
395 | ||
396 | prefetch((char *)skb + L1_CACHE_BYTES); | |
397 | if (unlikely(t_code)) { | |
703da5a1 RV |
398 | if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != |
399 | VXGE_HW_OK) { | |
400 | ||
401 | ring->stats.rx_errors++; | |
402 | vxge_debug_rx(VXGE_TRACE, | |
403 | "%s: %s :%d Rx T_code is %d", | |
404 | ring->ndev->name, __func__, | |
405 | __LINE__, t_code); | |
406 | ||
407 | /* If the t_code is not supported and if the | |
408 | * t_code is other than 0x5 (unparseable packet | |
409 | * such as unknown UPV6 header), Drop it !!! | |
410 | */ | |
411 | vxge_re_pre_post(dtr, ring, rx_priv); | |
412 | ||
413 | vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); | |
414 | ring->stats.rx_dropped++; | |
415 | continue; | |
416 | } | |
417 | } | |
418 | ||
419 | if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { | |
703da5a1 | 420 | if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { |
703da5a1 RV |
421 | if (!vxge_rx_map(dtr, ring)) { |
422 | skb_put(skb, pkt_length); | |
423 | ||
424 | pci_unmap_single(ring->pdev, data_dma, | |
425 | data_size, PCI_DMA_FROMDEVICE); | |
426 | ||
427 | vxge_hw_ring_rxd_pre_post(ringh, dtr); | |
428 | vxge_post(&dtr_cnt, &first_dtr, dtr, | |
429 | ringh); | |
430 | } else { | |
431 | dev_kfree_skb(rx_priv->skb); | |
432 | rx_priv->skb = skb; | |
433 | rx_priv->data_size = data_size; | |
434 | vxge_re_pre_post(dtr, ring, rx_priv); | |
435 | ||
436 | vxge_post(&dtr_cnt, &first_dtr, dtr, | |
437 | ringh); | |
438 | ring->stats.rx_dropped++; | |
439 | break; | |
440 | } | |
441 | } else { | |
442 | vxge_re_pre_post(dtr, ring, rx_priv); | |
443 | ||
444 | vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); | |
445 | ring->stats.rx_dropped++; | |
446 | break; | |
447 | } | |
448 | } else { | |
449 | struct sk_buff *skb_up; | |
450 | ||
451 | skb_up = netdev_alloc_skb(dev, pkt_length + | |
452 | VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); | |
453 | if (skb_up != NULL) { | |
454 | skb_reserve(skb_up, | |
455 | VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); | |
456 | ||
457 | pci_dma_sync_single_for_cpu(ring->pdev, | |
458 | data_dma, data_size, | |
459 | PCI_DMA_FROMDEVICE); | |
460 | ||
461 | vxge_debug_mem(VXGE_TRACE, | |
462 | "%s: %s:%d skb_up = %p", | |
463 | ring->ndev->name, __func__, | |
464 | __LINE__, skb); | |
465 | memcpy(skb_up->data, skb->data, pkt_length); | |
466 | ||
467 | vxge_re_pre_post(dtr, ring, rx_priv); | |
468 | ||
469 | vxge_post(&dtr_cnt, &first_dtr, dtr, | |
470 | ringh); | |
471 | /* will netif_rx small SKB instead */ | |
472 | skb = skb_up; | |
473 | skb_put(skb, pkt_length); | |
474 | } else { | |
475 | vxge_re_pre_post(dtr, ring, rx_priv); | |
476 | ||
477 | vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); | |
478 | vxge_debug_rx(VXGE_ERR, | |
479 | "%s: vxge_rx_1b_compl: out of " | |
480 | "memory", dev->name); | |
481 | ring->stats.skb_alloc_fail++; | |
482 | break; | |
483 | } | |
484 | } | |
485 | ||
486 | if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) && | |
487 | !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) && | |
feb990d4 | 488 | (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */ |
703da5a1 RV |
489 | ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK && |
490 | ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK) | |
491 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
492 | else | |
bc8acf2c | 493 | skb_checksum_none_assert(skb); |
703da5a1 | 494 | |
b81b3733 JM |
495 | |
496 | if (ring->rx_hwts) { | |
497 | struct skb_shared_hwtstamps *skb_hwts; | |
498 | u32 ns = *(u32 *)(skb->head + pkt_length); | |
499 | ||
500 | skb_hwts = skb_hwtstamps(skb); | |
501 | skb_hwts->hwtstamp = ns_to_ktime(ns); | |
502 | skb_hwts->syststamp.tv64 = 0; | |
503 | } | |
504 | ||
47f01db4 JM |
505 | /* rth_hash_type and rth_it_hit are non-zero regardless of |
506 | * whether rss is enabled. Only the rth_value is zero/non-zero | |
507 | * if rss is disabled/enabled, so key off of that. | |
508 | */ | |
509 | if (ext_info.rth_value) | |
510 | skb->rxhash = ext_info.rth_value; | |
511 | ||
703da5a1 RV |
512 | vxge_rx_complete(ring, skb, ext_info.vlan, |
513 | pkt_length, &ext_info); | |
514 | ||
515 | ring->budget--; | |
516 | ring->pkts_processed++; | |
517 | if (!ring->budget) | |
518 | break; | |
519 | ||
520 | } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr, | |
521 | &t_code) == VXGE_HW_OK); | |
522 | ||
523 | if (first_dtr) | |
524 | vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr); | |
525 | ||
703da5a1 RV |
526 | vxge_debug_entryexit(VXGE_TRACE, |
527 | "%s:%d Exiting...", | |
528 | __func__, __LINE__); | |
529 | return VXGE_HW_OK; | |
530 | } | |
531 | ||
532 | /* | |
533 | * vxge_xmit_compl | |
534 | * | |
535 | * If an interrupt was raised to indicate DMA complete of the Tx packet, | |
536 | * this function is called. It identifies the last TxD whose buffer was | |
537 | * freed and frees all skbs whose data have already DMA'ed into the NICs | |
538 | * internal memory. | |
539 | */ | |
42821a5b | 540 | static enum vxge_hw_status |
703da5a1 RV |
541 | vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, |
542 | enum vxge_hw_fifo_tcode t_code, void *userdata, | |
ff67df55 | 543 | struct sk_buff ***skb_ptr, int nr_skb, int *more) |
703da5a1 RV |
544 | { |
545 | struct vxge_fifo *fifo = (struct vxge_fifo *)userdata; | |
ff67df55 | 546 | struct sk_buff *skb, **done_skb = *skb_ptr; |
703da5a1 RV |
547 | int pkt_cnt = 0; |
548 | ||
549 | vxge_debug_entryexit(VXGE_TRACE, | |
550 | "%s:%d Entered....", __func__, __LINE__); | |
551 | ||
552 | do { | |
553 | int frg_cnt; | |
554 | skb_frag_t *frag; | |
555 | int i = 0, j; | |
556 | struct vxge_tx_priv *txd_priv = | |
557 | vxge_hw_fifo_txdl_private_get(dtr); | |
558 | ||
559 | skb = txd_priv->skb; | |
560 | frg_cnt = skb_shinfo(skb)->nr_frags; | |
561 | frag = &skb_shinfo(skb)->frags[0]; | |
562 | ||
563 | vxge_debug_tx(VXGE_TRACE, | |
564 | "%s: %s:%d fifo_hw = %p dtr = %p " | |
565 | "tcode = 0x%x", fifo->ndev->name, __func__, | |
566 | __LINE__, fifo_hw, dtr, t_code); | |
567 | /* check skb validity */ | |
568 | vxge_assert(skb); | |
569 | vxge_debug_tx(VXGE_TRACE, | |
570 | "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d", | |
571 | fifo->ndev->name, __func__, __LINE__, | |
572 | skb, txd_priv, frg_cnt); | |
573 | if (unlikely(t_code)) { | |
574 | fifo->stats.tx_errors++; | |
575 | vxge_debug_tx(VXGE_ERR, | |
576 | "%s: tx: dtr %p completed due to " | |
577 | "error t_code %01x", fifo->ndev->name, | |
578 | dtr, t_code); | |
579 | vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code); | |
580 | } | |
581 | ||
582 | /* for unfragmented skb */ | |
583 | pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], | |
584 | skb_headlen(skb), PCI_DMA_TODEVICE); | |
585 | ||
586 | for (j = 0; j < frg_cnt; j++) { | |
587 | pci_unmap_page(fifo->pdev, | |
588 | txd_priv->dma_buffers[i++], | |
9e903e08 | 589 | skb_frag_size(frag), PCI_DMA_TODEVICE); |
703da5a1 RV |
590 | frag += 1; |
591 | } | |
592 | ||
593 | vxge_hw_fifo_txdl_free(fifo_hw, dtr); | |
594 | ||
595 | /* Updating the statistics block */ | |
62ea0557 | 596 | u64_stats_update_begin(&fifo->stats.syncp); |
703da5a1 RV |
597 | fifo->stats.tx_frms++; |
598 | fifo->stats.tx_bytes += skb->len; | |
62ea0557 | 599 | u64_stats_update_end(&fifo->stats.syncp); |
703da5a1 | 600 | |
ff67df55 BL |
601 | *done_skb++ = skb; |
602 | ||
603 | if (--nr_skb <= 0) { | |
604 | *more = 1; | |
605 | break; | |
606 | } | |
703da5a1 RV |
607 | |
608 | pkt_cnt++; | |
609 | if (pkt_cnt > fifo->indicate_max_pkts) | |
610 | break; | |
611 | ||
612 | } while (vxge_hw_fifo_txdl_next_completed(fifo_hw, | |
613 | &dtr, &t_code) == VXGE_HW_OK); | |
614 | ||
ff67df55 | 615 | *skb_ptr = done_skb; |
98f45da2 JM |
616 | if (netif_tx_queue_stopped(fifo->txq)) |
617 | netif_tx_wake_queue(fifo->txq); | |
703da5a1 | 618 | |
703da5a1 RV |
619 | vxge_debug_entryexit(VXGE_TRACE, |
620 | "%s: %s:%d Exiting...", | |
621 | fifo->ndev->name, __func__, __LINE__); | |
622 | return VXGE_HW_OK; | |
623 | } | |
624 | ||
28679751 | 625 | /* select a vpath to transmit the packet */ |
98f45da2 | 626 | static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb) |
703da5a1 RV |
627 | { |
628 | u16 queue_len, counter = 0; | |
629 | if (skb->protocol == htons(ETH_P_IP)) { | |
630 | struct iphdr *ip; | |
631 | struct tcphdr *th; | |
632 | ||
633 | ip = ip_hdr(skb); | |
634 | ||
56f8a75c | 635 | if (!ip_is_fragment(ip)) { |
703da5a1 RV |
636 | th = (struct tcphdr *)(((unsigned char *)ip) + |
637 | ip->ihl*4); | |
638 | ||
639 | queue_len = vdev->no_of_vpath; | |
640 | counter = (ntohs(th->source) + | |
641 | ntohs(th->dest)) & | |
642 | vdev->vpath_selector[queue_len - 1]; | |
643 | if (counter >= queue_len) | |
644 | counter = queue_len - 1; | |
703da5a1 RV |
645 | } |
646 | } | |
647 | return counter; | |
648 | } | |
649 | ||
650 | static enum vxge_hw_status vxge_search_mac_addr_in_list( | |
651 | struct vxge_vpath *vpath, u64 del_mac) | |
652 | { | |
653 | struct list_head *entry, *next; | |
654 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { | |
655 | if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) | |
656 | return TRUE; | |
657 | } | |
658 | return FALSE; | |
659 | } | |
660 | ||
528f7272 JM |
661 | static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) |
662 | { | |
663 | struct vxge_mac_addrs *new_mac_entry; | |
664 | u8 *mac_address = NULL; | |
665 | ||
666 | if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT) | |
667 | return TRUE; | |
668 | ||
669 | new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC); | |
670 | if (!new_mac_entry) { | |
671 | vxge_debug_mem(VXGE_ERR, | |
672 | "%s: memory allocation failed", | |
673 | VXGE_DRIVER_NAME); | |
674 | return FALSE; | |
675 | } | |
676 | ||
677 | list_add(&new_mac_entry->item, &vpath->mac_addr_list); | |
678 | ||
679 | /* Copy the new mac address to the list */ | |
680 | mac_address = (u8 *)&new_mac_entry->macaddr; | |
681 | memcpy(mac_address, mac->macaddr, ETH_ALEN); | |
682 | ||
683 | new_mac_entry->state = mac->state; | |
684 | vpath->mac_addr_cnt++; | |
685 | ||
f8d4aa29 | 686 | if (is_multicast_ether_addr(mac->macaddr)) |
528f7272 JM |
687 | vpath->mcast_addr_cnt++; |
688 | ||
689 | return TRUE; | |
690 | } | |
691 | ||
692 | /* Add a mac address to DA table */ | |
693 | static enum vxge_hw_status | |
694 | vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac) | |
695 | { | |
696 | enum vxge_hw_status status = VXGE_HW_OK; | |
697 | struct vxge_vpath *vpath; | |
698 | enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode; | |
699 | ||
f8d4aa29 | 700 | if (is_multicast_ether_addr(mac->macaddr)) |
528f7272 JM |
701 | duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE; |
702 | else | |
703 | duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE; | |
704 | ||
705 | vpath = &vdev->vpaths[mac->vpath_no]; | |
706 | status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr, | |
707 | mac->macmask, duplicate_mode); | |
708 | if (status != VXGE_HW_OK) { | |
709 | vxge_debug_init(VXGE_ERR, | |
710 | "DA config add entry failed for vpath:%d", | |
711 | vpath->device_id); | |
712 | } else | |
713 | if (FALSE == vxge_mac_list_add(vpath, mac)) | |
714 | status = -EPERM; | |
715 | ||
716 | return status; | |
717 | } | |
718 | ||
703da5a1 RV |
719 | static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) |
720 | { | |
721 | struct macInfo mac_info; | |
722 | u8 *mac_address = NULL; | |
723 | u64 mac_addr = 0, vpath_vector = 0; | |
724 | int vpath_idx = 0; | |
725 | enum vxge_hw_status status = VXGE_HW_OK; | |
726 | struct vxge_vpath *vpath = NULL; | |
727 | struct __vxge_hw_device *hldev; | |
728 | ||
d8ee7071 | 729 | hldev = pci_get_drvdata(vdev->pdev); |
703da5a1 RV |
730 | |
731 | mac_address = (u8 *)&mac_addr; | |
732 | memcpy(mac_address, mac_header, ETH_ALEN); | |
733 | ||
734 | /* Is this mac address already in the list? */ | |
735 | for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { | |
736 | vpath = &vdev->vpaths[vpath_idx]; | |
737 | if (vxge_search_mac_addr_in_list(vpath, mac_addr)) | |
738 | return vpath_idx; | |
739 | } | |
740 | ||
741 | memset(&mac_info, 0, sizeof(struct macInfo)); | |
742 | memcpy(mac_info.macaddr, mac_header, ETH_ALEN); | |
743 | ||
744 | /* Any vpath has room to add mac address to its da table? */ | |
745 | for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { | |
746 | vpath = &vdev->vpaths[vpath_idx]; | |
747 | if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) { | |
748 | /* Add this mac address to this vpath */ | |
749 | mac_info.vpath_no = vpath_idx; | |
750 | mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; | |
751 | status = vxge_add_mac_addr(vdev, &mac_info); | |
752 | if (status != VXGE_HW_OK) | |
753 | return -EPERM; | |
754 | return vpath_idx; | |
755 | } | |
756 | } | |
757 | ||
758 | mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST; | |
759 | vpath_idx = 0; | |
760 | mac_info.vpath_no = vpath_idx; | |
761 | /* Is the first vpath already selected as catch-basin ? */ | |
762 | vpath = &vdev->vpaths[vpath_idx]; | |
763 | if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) { | |
764 | /* Add this mac address to this vpath */ | |
765 | if (FALSE == vxge_mac_list_add(vpath, &mac_info)) | |
766 | return -EPERM; | |
767 | return vpath_idx; | |
768 | } | |
769 | ||
770 | /* Select first vpath as catch-basin */ | |
771 | vpath_vector = vxge_mBIT(vpath->device_id); | |
772 | status = vxge_hw_mgmt_reg_write(vpath->vdev->devh, | |
773 | vxge_hw_mgmt_reg_type_mrpcim, | |
774 | 0, | |
775 | (ulong)offsetof( | |
776 | struct vxge_hw_mrpcim_reg, | |
777 | rts_mgr_cbasin_cfg), | |
778 | vpath_vector); | |
779 | if (status != VXGE_HW_OK) { | |
780 | vxge_debug_tx(VXGE_ERR, | |
781 | "%s: Unable to set the vpath-%d in catch-basin mode", | |
782 | VXGE_DRIVER_NAME, vpath->device_id); | |
783 | return -EPERM; | |
784 | } | |
785 | ||
786 | if (FALSE == vxge_mac_list_add(vpath, &mac_info)) | |
787 | return -EPERM; | |
788 | ||
789 | return vpath_idx; | |
790 | } | |
791 | ||
792 | /** | |
793 | * vxge_xmit | |
794 | * @skb : the socket buffer containing the Tx data. | |
795 | * @dev : device pointer. | |
796 | * | |
797 | * This function is the Tx entry point of the driver. Neterion NIC supports | |
798 | * certain protocol assist features on Tx side, namely CSO, S/G, LSO. | |
703da5a1 | 799 | */ |
61357325 | 800 | static netdev_tx_t |
703da5a1 RV |
801 | vxge_xmit(struct sk_buff *skb, struct net_device *dev) |
802 | { | |
803 | struct vxge_fifo *fifo = NULL; | |
804 | void *dtr_priv; | |
805 | void *dtr = NULL; | |
806 | struct vxgedev *vdev = NULL; | |
807 | enum vxge_hw_status status; | |
808 | int frg_cnt, first_frg_len; | |
809 | skb_frag_t *frag; | |
810 | int i = 0, j = 0, avail; | |
811 | u64 dma_pointer; | |
812 | struct vxge_tx_priv *txdl_priv = NULL; | |
813 | struct __vxge_hw_fifo *fifo_hw; | |
703da5a1 | 814 | int offload_type; |
703da5a1 | 815 | int vpath_no = 0; |
703da5a1 RV |
816 | |
817 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", | |
818 | dev->name, __func__, __LINE__); | |
819 | ||
820 | /* A buffer with no data will be dropped */ | |
821 | if (unlikely(skb->len <= 0)) { | |
822 | vxge_debug_tx(VXGE_ERR, | |
823 | "%s: Buffer has no data..", dev->name); | |
824 | dev_kfree_skb(skb); | |
825 | return NETDEV_TX_OK; | |
826 | } | |
827 | ||
5f54cebb | 828 | vdev = netdev_priv(dev); |
703da5a1 RV |
829 | |
830 | if (unlikely(!is_vxge_card_up(vdev))) { | |
831 | vxge_debug_tx(VXGE_ERR, | |
832 | "%s: vdev not initialized", dev->name); | |
833 | dev_kfree_skb(skb); | |
834 | return NETDEV_TX_OK; | |
835 | } | |
836 | ||
837 | if (vdev->config.addr_learn_en) { | |
838 | vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN); | |
839 | if (vpath_no == -EPERM) { | |
840 | vxge_debug_tx(VXGE_ERR, | |
841 | "%s: Failed to store the mac address", | |
842 | dev->name); | |
843 | dev_kfree_skb(skb); | |
844 | return NETDEV_TX_OK; | |
845 | } | |
846 | } | |
847 | ||
848 | if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) | |
849 | vpath_no = skb_get_queue_mapping(skb); | |
850 | else if (vdev->config.tx_steering_type == TX_PORT_STEERING) | |
98f45da2 | 851 | vpath_no = vxge_get_vpath_no(vdev, skb); |
703da5a1 RV |
852 | |
853 | vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no); | |
854 | ||
855 | if (vpath_no >= vdev->no_of_vpath) | |
856 | vpath_no = 0; | |
857 | ||
858 | fifo = &vdev->vpaths[vpath_no].fifo; | |
859 | fifo_hw = fifo->handle; | |
860 | ||
98f45da2 | 861 | if (netif_tx_queue_stopped(fifo->txq)) |
d03848e0 | 862 | return NETDEV_TX_BUSY; |
d03848e0 | 863 | |
703da5a1 RV |
864 | avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw); |
865 | if (avail == 0) { | |
866 | vxge_debug_tx(VXGE_ERR, | |
867 | "%s: No free TXDs available", dev->name); | |
868 | fifo->stats.txd_not_free++; | |
98f45da2 | 869 | goto _exit0; |
703da5a1 RV |
870 | } |
871 | ||
4403b371 BL |
872 | /* Last TXD? Stop tx queue to avoid dropping packets. TX |
873 | * completion will resume the queue. | |
874 | */ | |
875 | if (avail == 1) | |
98f45da2 | 876 | netif_tx_stop_queue(fifo->txq); |
4403b371 | 877 | |
703da5a1 RV |
878 | status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv); |
879 | if (unlikely(status != VXGE_HW_OK)) { | |
880 | vxge_debug_tx(VXGE_ERR, | |
881 | "%s: Out of descriptors .", dev->name); | |
882 | fifo->stats.txd_out_of_desc++; | |
98f45da2 | 883 | goto _exit0; |
703da5a1 RV |
884 | } |
885 | ||
886 | vxge_debug_tx(VXGE_TRACE, | |
887 | "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p", | |
888 | dev->name, __func__, __LINE__, | |
889 | fifo_hw, dtr, dtr_priv); | |
890 | ||
eab6d18d | 891 | if (vlan_tx_tag_present(skb)) { |
703da5a1 RV |
892 | u16 vlan_tag = vlan_tx_tag_get(skb); |
893 | vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag); | |
894 | } | |
895 | ||
896 | first_frg_len = skb_headlen(skb); | |
897 | ||
898 | dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len, | |
899 | PCI_DMA_TODEVICE); | |
900 | ||
901 | if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { | |
902 | vxge_hw_fifo_txdl_free(fifo_hw, dtr); | |
703da5a1 | 903 | fifo->stats.pci_map_fail++; |
98f45da2 | 904 | goto _exit0; |
703da5a1 RV |
905 | } |
906 | ||
907 | txdl_priv = vxge_hw_fifo_txdl_private_get(dtr); | |
908 | txdl_priv->skb = skb; | |
909 | txdl_priv->dma_buffers[j] = dma_pointer; | |
910 | ||
911 | frg_cnt = skb_shinfo(skb)->nr_frags; | |
912 | vxge_debug_tx(VXGE_TRACE, | |
913 | "%s: %s:%d skb = %p txdl_priv = %p " | |
914 | "frag_cnt = %d dma_pointer = 0x%llx", dev->name, | |
915 | __func__, __LINE__, skb, txdl_priv, | |
916 | frg_cnt, (unsigned long long)dma_pointer); | |
917 | ||
918 | vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer, | |
919 | first_frg_len); | |
920 | ||
921 | frag = &skb_shinfo(skb)->frags[0]; | |
922 | for (i = 0; i < frg_cnt; i++) { | |
923 | /* ignore 0 length fragment */ | |
9e903e08 | 924 | if (!skb_frag_size(frag)) |
703da5a1 RV |
925 | continue; |
926 | ||
94d60a7b | 927 | dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag, |
9e903e08 | 928 | 0, skb_frag_size(frag), |
94d60a7b | 929 | DMA_TO_DEVICE); |
703da5a1 | 930 | |
94d60a7b | 931 | if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) |
98f45da2 | 932 | goto _exit2; |
703da5a1 RV |
933 | vxge_debug_tx(VXGE_TRACE, |
934 | "%s: %s:%d frag = %d dma_pointer = 0x%llx", | |
935 | dev->name, __func__, __LINE__, i, | |
936 | (unsigned long long)dma_pointer); | |
937 | ||
938 | txdl_priv->dma_buffers[j] = dma_pointer; | |
939 | vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer, | |
9e903e08 | 940 | skb_frag_size(frag)); |
703da5a1 RV |
941 | frag += 1; |
942 | } | |
943 | ||
944 | offload_type = vxge_offload_type(skb); | |
945 | ||
946 | if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { | |
703da5a1 RV |
947 | int mss = vxge_tcp_mss(skb); |
948 | if (mss) { | |
98f45da2 | 949 | vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d", |
703da5a1 RV |
950 | dev->name, __func__, __LINE__, mss); |
951 | vxge_hw_fifo_txdl_mss_set(dtr, mss); | |
952 | } else { | |
953 | vxge_assert(skb->len <= | |
954 | dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE); | |
955 | vxge_assert(0); | |
956 | goto _exit1; | |
957 | } | |
958 | } | |
959 | ||
960 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
961 | vxge_hw_fifo_txdl_cksum_set_bits(dtr, | |
962 | VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN | | |
963 | VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN | | |
964 | VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN); | |
965 | ||
966 | vxge_hw_fifo_txdl_post(fifo_hw, dtr); | |
703da5a1 | 967 | |
703da5a1 RV |
968 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", |
969 | dev->name, __func__, __LINE__); | |
6ed10654 | 970 | return NETDEV_TX_OK; |
703da5a1 | 971 | |
98f45da2 | 972 | _exit2: |
703da5a1 | 973 | vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name); |
703da5a1 RV |
974 | _exit1: |
975 | j = 0; | |
976 | frag = &skb_shinfo(skb)->frags[0]; | |
977 | ||
978 | pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++], | |
979 | skb_headlen(skb), PCI_DMA_TODEVICE); | |
980 | ||
981 | for (; j < i; j++) { | |
982 | pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j], | |
9e903e08 | 983 | skb_frag_size(frag), PCI_DMA_TODEVICE); |
703da5a1 RV |
984 | frag += 1; |
985 | } | |
986 | ||
987 | vxge_hw_fifo_txdl_free(fifo_hw, dtr); | |
98f45da2 JM |
988 | _exit0: |
989 | netif_tx_stop_queue(fifo->txq); | |
703da5a1 | 990 | dev_kfree_skb(skb); |
703da5a1 | 991 | |
6ed10654 | 992 | return NETDEV_TX_OK; |
703da5a1 RV |
993 | } |
994 | ||
995 | /* | |
996 | * vxge_rx_term | |
997 | * | |
998 | * Function will be called by hw function to abort all outstanding receive | |
999 | * descriptors. | |
1000 | */ | |
1001 | static void | |
1002 | vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata) | |
1003 | { | |
1004 | struct vxge_ring *ring = (struct vxge_ring *)userdata; | |
1005 | struct vxge_rx_priv *rx_priv = | |
1006 | vxge_hw_ring_rxd_private_get(dtrh); | |
1007 | ||
1008 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", | |
1009 | ring->ndev->name, __func__, __LINE__); | |
1010 | if (state != VXGE_HW_RXD_STATE_POSTED) | |
1011 | return; | |
1012 | ||
1013 | pci_unmap_single(ring->pdev, rx_priv->data_dma, | |
1014 | rx_priv->data_size, PCI_DMA_FROMDEVICE); | |
1015 | ||
1016 | dev_kfree_skb(rx_priv->skb); | |
ea11bbe0 | 1017 | rx_priv->skb_data = NULL; |
703da5a1 RV |
1018 | |
1019 | vxge_debug_entryexit(VXGE_TRACE, | |
1020 | "%s: %s:%d Exiting...", | |
1021 | ring->ndev->name, __func__, __LINE__); | |
1022 | } | |
1023 | ||
1024 | /* | |
1025 | * vxge_tx_term | |
1026 | * | |
1027 | * Function will be called to abort all outstanding tx descriptors | |
1028 | */ | |
1029 | static void | |
1030 | vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) | |
1031 | { | |
1032 | struct vxge_fifo *fifo = (struct vxge_fifo *)userdata; | |
1033 | skb_frag_t *frag; | |
1034 | int i = 0, j, frg_cnt; | |
1035 | struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh); | |
1036 | struct sk_buff *skb = txd_priv->skb; | |
1037 | ||
1038 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); | |
1039 | ||
1040 | if (state != VXGE_HW_TXDL_STATE_POSTED) | |
1041 | return; | |
1042 | ||
1043 | /* check skb validity */ | |
1044 | vxge_assert(skb); | |
1045 | frg_cnt = skb_shinfo(skb)->nr_frags; | |
1046 | frag = &skb_shinfo(skb)->frags[0]; | |
1047 | ||
1048 | /* for unfragmented skb */ | |
1049 | pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], | |
1050 | skb_headlen(skb), PCI_DMA_TODEVICE); | |
1051 | ||
1052 | for (j = 0; j < frg_cnt; j++) { | |
1053 | pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++], | |
9e903e08 | 1054 | skb_frag_size(frag), PCI_DMA_TODEVICE); |
703da5a1 RV |
1055 | frag += 1; |
1056 | } | |
1057 | ||
1058 | dev_kfree_skb(skb); | |
1059 | ||
1060 | vxge_debug_entryexit(VXGE_TRACE, | |
1061 | "%s:%d Exiting...", __func__, __LINE__); | |
1062 | } | |
1063 | ||
528f7272 JM |
1064 | static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) |
1065 | { | |
1066 | struct list_head *entry, *next; | |
1067 | u64 del_mac = 0; | |
1068 | u8 *mac_address = (u8 *) (&del_mac); | |
1069 | ||
1070 | /* Copy the mac address to delete from the list */ | |
1071 | memcpy(mac_address, mac->macaddr, ETH_ALEN); | |
1072 | ||
1073 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { | |
1074 | if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { | |
1075 | list_del(entry); | |
1076 | kfree((struct vxge_mac_addrs *)entry); | |
1077 | vpath->mac_addr_cnt--; | |
1078 | ||
f8d4aa29 | 1079 | if (is_multicast_ether_addr(mac->macaddr)) |
528f7272 JM |
1080 | vpath->mcast_addr_cnt--; |
1081 | return TRUE; | |
1082 | } | |
1083 | } | |
1084 | ||
1085 | return FALSE; | |
1086 | } | |
1087 | ||
1088 | /* delete a mac address from DA table */ | |
1089 | static enum vxge_hw_status | |
1090 | vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac) | |
1091 | { | |
1092 | enum vxge_hw_status status = VXGE_HW_OK; | |
1093 | struct vxge_vpath *vpath; | |
1094 | ||
1095 | vpath = &vdev->vpaths[mac->vpath_no]; | |
1096 | status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr, | |
1097 | mac->macmask); | |
1098 | if (status != VXGE_HW_OK) { | |
1099 | vxge_debug_init(VXGE_ERR, | |
1100 | "DA config delete entry failed for vpath:%d", | |
1101 | vpath->device_id); | |
1102 | } else | |
1103 | vxge_mac_list_del(vpath, mac); | |
1104 | return status; | |
1105 | } | |
1106 | ||
703da5a1 RV |
1107 | /** |
1108 | * vxge_set_multicast | |
1109 | * @dev: pointer to the device structure | |
1110 | * | |
1111 | * Entry point for multicast address enable/disable | |
1112 | * This function is a driver entry point which gets called by the kernel | |
1113 | * whenever multicast addresses must be enabled/disabled. This also gets | |
1114 | * called to set/reset promiscuous mode. Depending on the deivce flag, we | |
1115 | * determine, if multicast address must be enabled or if promiscuous mode | |
1116 | * is to be disabled etc. | |
1117 | */ | |
1118 | static void vxge_set_multicast(struct net_device *dev) | |
1119 | { | |
22bedad3 | 1120 | struct netdev_hw_addr *ha; |
703da5a1 RV |
1121 | struct vxgedev *vdev; |
1122 | int i, mcast_cnt = 0; | |
7adf7d1b JM |
1123 | struct __vxge_hw_device *hldev; |
1124 | struct vxge_vpath *vpath; | |
703da5a1 RV |
1125 | enum vxge_hw_status status = VXGE_HW_OK; |
1126 | struct macInfo mac_info; | |
1127 | int vpath_idx = 0; | |
1128 | struct vxge_mac_addrs *mac_entry; | |
1129 | struct list_head *list_head; | |
1130 | struct list_head *entry, *next; | |
1131 | u8 *mac_address = NULL; | |
1132 | ||
1133 | vxge_debug_entryexit(VXGE_TRACE, | |
1134 | "%s:%d", __func__, __LINE__); | |
1135 | ||
5f54cebb | 1136 | vdev = netdev_priv(dev); |
64699336 | 1137 | hldev = vdev->devh; |
703da5a1 RV |
1138 | |
1139 | if (unlikely(!is_vxge_card_up(vdev))) | |
1140 | return; | |
1141 | ||
1142 | if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) { | |
1143 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
7adf7d1b JM |
1144 | vpath = &vdev->vpaths[i]; |
1145 | vxge_assert(vpath->is_open); | |
1146 | status = vxge_hw_vpath_mcast_enable(vpath->handle); | |
1147 | if (status != VXGE_HW_OK) | |
1148 | vxge_debug_init(VXGE_ERR, "failed to enable " | |
1149 | "multicast, status %d", status); | |
703da5a1 RV |
1150 | vdev->all_multi_flg = 1; |
1151 | } | |
7adf7d1b | 1152 | } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) { |
703da5a1 | 1153 | for (i = 0; i < vdev->no_of_vpath; i++) { |
7adf7d1b JM |
1154 | vpath = &vdev->vpaths[i]; |
1155 | vxge_assert(vpath->is_open); | |
1156 | status = vxge_hw_vpath_mcast_disable(vpath->handle); | |
1157 | if (status != VXGE_HW_OK) | |
1158 | vxge_debug_init(VXGE_ERR, "failed to disable " | |
1159 | "multicast, status %d", status); | |
1160 | vdev->all_multi_flg = 0; | |
703da5a1 RV |
1161 | } |
1162 | } | |
1163 | ||
703da5a1 RV |
1164 | |
1165 | if (!vdev->config.addr_learn_en) { | |
7adf7d1b JM |
1166 | for (i = 0; i < vdev->no_of_vpath; i++) { |
1167 | vpath = &vdev->vpaths[i]; | |
1168 | vxge_assert(vpath->is_open); | |
1169 | ||
1170 | if (dev->flags & IFF_PROMISC) | |
703da5a1 | 1171 | status = vxge_hw_vpath_promisc_enable( |
7adf7d1b JM |
1172 | vpath->handle); |
1173 | else | |
703da5a1 | 1174 | status = vxge_hw_vpath_promisc_disable( |
7adf7d1b JM |
1175 | vpath->handle); |
1176 | if (status != VXGE_HW_OK) | |
1177 | vxge_debug_init(VXGE_ERR, "failed to %s promisc" | |
1178 | ", status %d", dev->flags&IFF_PROMISC ? | |
1179 | "enable" : "disable", status); | |
703da5a1 RV |
1180 | } |
1181 | } | |
1182 | ||
1183 | memset(&mac_info, 0, sizeof(struct macInfo)); | |
1184 | /* Update individual M_CAST address list */ | |
4cd24eaf | 1185 | if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) { |
703da5a1 RV |
1186 | mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; |
1187 | list_head = &vdev->vpaths[0].mac_addr_list; | |
4cd24eaf | 1188 | if ((netdev_mc_count(dev) + |
703da5a1 RV |
1189 | (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) > |
1190 | vdev->vpaths[0].max_mac_addr_cnt) | |
1191 | goto _set_all_mcast; | |
1192 | ||
1193 | /* Delete previous MC's */ | |
1194 | for (i = 0; i < mcast_cnt; i++) { | |
703da5a1 | 1195 | list_for_each_safe(entry, next, list_head) { |
2c91308f | 1196 | mac_entry = (struct vxge_mac_addrs *)entry; |
703da5a1 RV |
1197 | /* Copy the mac address to delete */ |
1198 | mac_address = (u8 *)&mac_entry->macaddr; | |
1199 | memcpy(mac_info.macaddr, mac_address, ETH_ALEN); | |
1200 | ||
f8d4aa29 | 1201 | if (is_multicast_ether_addr(mac_info.macaddr)) { |
703da5a1 RV |
1202 | for (vpath_idx = 0; vpath_idx < |
1203 | vdev->no_of_vpath; | |
1204 | vpath_idx++) { | |
1205 | mac_info.vpath_no = vpath_idx; | |
1206 | status = vxge_del_mac_addr( | |
1207 | vdev, | |
1208 | &mac_info); | |
1209 | } | |
1210 | } | |
1211 | } | |
1212 | } | |
1213 | ||
1214 | /* Add new ones */ | |
22bedad3 JP |
1215 | netdev_for_each_mc_addr(ha, dev) { |
1216 | memcpy(mac_info.macaddr, ha->addr, ETH_ALEN); | |
703da5a1 RV |
1217 | for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; |
1218 | vpath_idx++) { | |
1219 | mac_info.vpath_no = vpath_idx; | |
1220 | mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; | |
1221 | status = vxge_add_mac_addr(vdev, &mac_info); | |
1222 | if (status != VXGE_HW_OK) { | |
1223 | vxge_debug_init(VXGE_ERR, | |
1224 | "%s:%d Setting individual" | |
1225 | "multicast address failed", | |
1226 | __func__, __LINE__); | |
1227 | goto _set_all_mcast; | |
1228 | } | |
1229 | } | |
1230 | } | |
1231 | ||
1232 | return; | |
1233 | _set_all_mcast: | |
1234 | mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; | |
1235 | /* Delete previous MC's */ | |
1236 | for (i = 0; i < mcast_cnt; i++) { | |
703da5a1 | 1237 | list_for_each_safe(entry, next, list_head) { |
2c91308f | 1238 | mac_entry = (struct vxge_mac_addrs *)entry; |
703da5a1 RV |
1239 | /* Copy the mac address to delete */ |
1240 | mac_address = (u8 *)&mac_entry->macaddr; | |
1241 | memcpy(mac_info.macaddr, mac_address, ETH_ALEN); | |
1242 | ||
f8d4aa29 | 1243 | if (is_multicast_ether_addr(mac_info.macaddr)) |
703da5a1 RV |
1244 | break; |
1245 | } | |
1246 | ||
1247 | for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; | |
1248 | vpath_idx++) { | |
1249 | mac_info.vpath_no = vpath_idx; | |
1250 | status = vxge_del_mac_addr(vdev, &mac_info); | |
1251 | } | |
1252 | } | |
1253 | ||
1254 | /* Enable all multicast */ | |
1255 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
7adf7d1b JM |
1256 | vpath = &vdev->vpaths[i]; |
1257 | vxge_assert(vpath->is_open); | |
1258 | ||
1259 | status = vxge_hw_vpath_mcast_enable(vpath->handle); | |
703da5a1 RV |
1260 | if (status != VXGE_HW_OK) { |
1261 | vxge_debug_init(VXGE_ERR, | |
1262 | "%s:%d Enabling all multicasts failed", | |
1263 | __func__, __LINE__); | |
1264 | } | |
1265 | vdev->all_multi_flg = 1; | |
1266 | } | |
1267 | dev->flags |= IFF_ALLMULTI; | |
1268 | } | |
1269 | ||
1270 | vxge_debug_entryexit(VXGE_TRACE, | |
1271 | "%s:%d Exiting...", __func__, __LINE__); | |
1272 | } | |
1273 | ||
1274 | /** | |
1275 | * vxge_set_mac_addr | |
1276 | * @dev: pointer to the device structure | |
1277 | * | |
1278 | * Update entry "0" (default MAC addr) | |
1279 | */ | |
1280 | static int vxge_set_mac_addr(struct net_device *dev, void *p) | |
1281 | { | |
1282 | struct sockaddr *addr = p; | |
1283 | struct vxgedev *vdev; | |
2c91308f | 1284 | struct __vxge_hw_device *hldev; |
703da5a1 RV |
1285 | enum vxge_hw_status status = VXGE_HW_OK; |
1286 | struct macInfo mac_info_new, mac_info_old; | |
1287 | int vpath_idx = 0; | |
1288 | ||
1289 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); | |
1290 | ||
5f54cebb | 1291 | vdev = netdev_priv(dev); |
703da5a1 RV |
1292 | hldev = vdev->devh; |
1293 | ||
1294 | if (!is_valid_ether_addr(addr->sa_data)) | |
1295 | return -EINVAL; | |
1296 | ||
1297 | memset(&mac_info_new, 0, sizeof(struct macInfo)); | |
1298 | memset(&mac_info_old, 0, sizeof(struct macInfo)); | |
1299 | ||
1300 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", | |
1301 | __func__, __LINE__); | |
1302 | ||
1303 | /* Get the old address */ | |
1304 | memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len); | |
1305 | ||
1306 | /* Copy the new address */ | |
1307 | memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len); | |
1308 | ||
1309 | /* First delete the old mac address from all the vpaths | |
1310 | as we can't specify the index while adding new mac address */ | |
1311 | for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { | |
1312 | struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx]; | |
1313 | if (!vpath->is_open) { | |
1314 | /* This can happen when this interface is added/removed | |
1315 | to the bonding interface. Delete this station address | |
1316 | from the linked list */ | |
1317 | vxge_mac_list_del(vpath, &mac_info_old); | |
1318 | ||
1319 | /* Add this new address to the linked list | |
1320 | for later restoring */ | |
1321 | vxge_mac_list_add(vpath, &mac_info_new); | |
1322 | ||
1323 | continue; | |
1324 | } | |
1325 | /* Delete the station address */ | |
1326 | mac_info_old.vpath_no = vpath_idx; | |
1327 | status = vxge_del_mac_addr(vdev, &mac_info_old); | |
1328 | } | |
1329 | ||
1330 | if (unlikely(!is_vxge_card_up(vdev))) { | |
1331 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
1332 | return VXGE_HW_OK; | |
1333 | } | |
1334 | ||
1335 | /* Set this mac address to all the vpaths */ | |
1336 | for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { | |
1337 | mac_info_new.vpath_no = vpath_idx; | |
1338 | mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; | |
1339 | status = vxge_add_mac_addr(vdev, &mac_info_new); | |
1340 | if (status != VXGE_HW_OK) | |
1341 | return -EINVAL; | |
1342 | } | |
1343 | ||
1344 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
1345 | ||
1346 | return status; | |
1347 | } | |
1348 | ||
1349 | /* | |
1350 | * vxge_vpath_intr_enable | |
1351 | * @vdev: pointer to vdev | |
1352 | * @vp_id: vpath for which to enable the interrupts | |
1353 | * | |
1354 | * Enables the interrupts for the vpath | |
1355 | */ | |
42821a5b | 1356 | static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) |
703da5a1 RV |
1357 | { |
1358 | struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; | |
b59c9457 SH |
1359 | int msix_id = 0; |
1360 | int tim_msix_id[4] = {0, 1, 0, 0}; | |
1361 | int alarm_msix_id = VXGE_ALARM_MSIX_ID; | |
703da5a1 RV |
1362 | |
1363 | vxge_hw_vpath_intr_enable(vpath->handle); | |
1364 | ||
1365 | if (vdev->config.intr_type == INTA) | |
1366 | vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle); | |
1367 | else { | |
703da5a1 RV |
1368 | vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, |
1369 | alarm_msix_id); | |
1370 | ||
b59c9457 | 1371 | msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE; |
703da5a1 RV |
1372 | vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); |
1373 | vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1); | |
1374 | ||
1375 | /* enable the alarm vector */ | |
b59c9457 SH |
1376 | msix_id = (vpath->handle->vpath->hldev->first_vp_id * |
1377 | VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id; | |
1378 | vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); | |
703da5a1 RV |
1379 | } |
1380 | } | |
1381 | ||
1382 | /* | |
1383 | * vxge_vpath_intr_disable | |
1384 | * @vdev: pointer to vdev | |
1385 | * @vp_id: vpath for which to disable the interrupts | |
1386 | * | |
1387 | * Disables the interrupts for the vpath | |
1388 | */ | |
42821a5b | 1389 | static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) |
703da5a1 RV |
1390 | { |
1391 | struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; | |
4d2a5b40 | 1392 | struct __vxge_hw_device *hldev; |
703da5a1 RV |
1393 | int msix_id; |
1394 | ||
d8ee7071 | 1395 | hldev = pci_get_drvdata(vdev->pdev); |
4d2a5b40 JM |
1396 | |
1397 | vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id); | |
1398 | ||
703da5a1 RV |
1399 | vxge_hw_vpath_intr_disable(vpath->handle); |
1400 | ||
1401 | if (vdev->config.intr_type == INTA) | |
1402 | vxge_hw_vpath_inta_mask_tx_rx(vpath->handle); | |
1403 | else { | |
b59c9457 | 1404 | msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE; |
703da5a1 RV |
1405 | vxge_hw_vpath_msix_mask(vpath->handle, msix_id); |
1406 | vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1); | |
1407 | ||
1408 | /* disable the alarm vector */ | |
b59c9457 SH |
1409 | msix_id = (vpath->handle->vpath->hldev->first_vp_id * |
1410 | VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; | |
703da5a1 RV |
1411 | vxge_hw_vpath_msix_mask(vpath->handle, msix_id); |
1412 | } | |
1413 | } | |
1414 | ||
528f7272 JM |
1415 | /* list all mac addresses from DA table */ |
1416 | static enum vxge_hw_status | |
1417 | vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac) | |
1418 | { | |
1419 | enum vxge_hw_status status = VXGE_HW_OK; | |
1420 | unsigned char macmask[ETH_ALEN]; | |
1421 | unsigned char macaddr[ETH_ALEN]; | |
1422 | ||
1423 | status = vxge_hw_vpath_mac_addr_get(vpath->handle, | |
1424 | macaddr, macmask); | |
1425 | if (status != VXGE_HW_OK) { | |
1426 | vxge_debug_init(VXGE_ERR, | |
1427 | "DA config list entry failed for vpath:%d", | |
1428 | vpath->device_id); | |
1429 | return status; | |
1430 | } | |
1431 | ||
1432 | while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) { | |
1433 | status = vxge_hw_vpath_mac_addr_get_next(vpath->handle, | |
1434 | macaddr, macmask); | |
1435 | if (status != VXGE_HW_OK) | |
1436 | break; | |
1437 | } | |
1438 | ||
1439 | return status; | |
1440 | } | |
1441 | ||
1442 | /* Store all mac addresses from the list to the DA table */ | |
1443 | static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) | |
1444 | { | |
1445 | enum vxge_hw_status status = VXGE_HW_OK; | |
1446 | struct macInfo mac_info; | |
1447 | u8 *mac_address = NULL; | |
1448 | struct list_head *entry, *next; | |
1449 | ||
1450 | memset(&mac_info, 0, sizeof(struct macInfo)); | |
1451 | ||
1452 | if (vpath->is_open) { | |
1453 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { | |
1454 | mac_address = | |
1455 | (u8 *)& | |
1456 | ((struct vxge_mac_addrs *)entry)->macaddr; | |
1457 | memcpy(mac_info.macaddr, mac_address, ETH_ALEN); | |
1458 | ((struct vxge_mac_addrs *)entry)->state = | |
1459 | VXGE_LL_MAC_ADDR_IN_DA_TABLE; | |
1460 | /* does this mac address already exist in da table? */ | |
1461 | status = vxge_search_mac_addr_in_da_table(vpath, | |
1462 | &mac_info); | |
1463 | if (status != VXGE_HW_OK) { | |
1464 | /* Add this mac address to the DA table */ | |
1465 | status = vxge_hw_vpath_mac_addr_add( | |
1466 | vpath->handle, mac_info.macaddr, | |
1467 | mac_info.macmask, | |
1468 | VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE); | |
1469 | if (status != VXGE_HW_OK) { | |
1470 | vxge_debug_init(VXGE_ERR, | |
1471 | "DA add entry failed for vpath:%d", | |
1472 | vpath->device_id); | |
1473 | ((struct vxge_mac_addrs *)entry)->state | |
1474 | = VXGE_LL_MAC_ADDR_IN_LIST; | |
1475 | } | |
1476 | } | |
1477 | } | |
1478 | } | |
1479 | ||
1480 | return status; | |
1481 | } | |
1482 | ||
1483 | /* Store all vlan ids from the list to the vid table */ | |
1484 | static enum vxge_hw_status | |
1485 | vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) | |
1486 | { | |
1487 | enum vxge_hw_status status = VXGE_HW_OK; | |
1488 | struct vxgedev *vdev = vpath->vdev; | |
1489 | u16 vid; | |
1490 | ||
53515734 JP |
1491 | if (!vpath->is_open) |
1492 | return status; | |
528f7272 | 1493 | |
53515734 JP |
1494 | for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID) |
1495 | status = vxge_hw_vpath_vid_add(vpath->handle, vid); | |
528f7272 JM |
1496 | |
1497 | return status; | |
1498 | } | |
1499 | ||
703da5a1 RV |
1500 | /* |
1501 | * vxge_reset_vpath | |
1502 | * @vdev: pointer to vdev | |
1503 | * @vp_id: vpath to reset | |
1504 | * | |
1505 | * Resets the vpath | |
1506 | */ | |
1507 | static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) | |
1508 | { | |
1509 | enum vxge_hw_status status = VXGE_HW_OK; | |
7adf7d1b | 1510 | struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; |
703da5a1 RV |
1511 | int ret = 0; |
1512 | ||
1513 | /* check if device is down already */ | |
1514 | if (unlikely(!is_vxge_card_up(vdev))) | |
1515 | return 0; | |
1516 | ||
1517 | /* is device reset already scheduled */ | |
1518 | if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) | |
1519 | return 0; | |
1520 | ||
7adf7d1b JM |
1521 | if (vpath->handle) { |
1522 | if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) { | |
703da5a1 | 1523 | if (is_vxge_card_up(vdev) && |
7adf7d1b | 1524 | vxge_hw_vpath_recover_from_reset(vpath->handle) |
703da5a1 RV |
1525 | != VXGE_HW_OK) { |
1526 | vxge_debug_init(VXGE_ERR, | |
1527 | "vxge_hw_vpath_recover_from_reset" | |
1528 | "failed for vpath:%d", vp_id); | |
1529 | return status; | |
1530 | } | |
1531 | } else { | |
1532 | vxge_debug_init(VXGE_ERR, | |
1533 | "vxge_hw_vpath_reset failed for" | |
1534 | "vpath:%d", vp_id); | |
1535 | return status; | |
1536 | } | |
1537 | } else | |
1538 | return VXGE_HW_FAIL; | |
1539 | ||
7adf7d1b JM |
1540 | vxge_restore_vpath_mac_addr(vpath); |
1541 | vxge_restore_vpath_vid_table(vpath); | |
703da5a1 RV |
1542 | |
1543 | /* Enable all broadcast */ | |
7adf7d1b JM |
1544 | vxge_hw_vpath_bcast_enable(vpath->handle); |
1545 | ||
1546 | /* Enable all multicast */ | |
1547 | if (vdev->all_multi_flg) { | |
1548 | status = vxge_hw_vpath_mcast_enable(vpath->handle); | |
1549 | if (status != VXGE_HW_OK) | |
1550 | vxge_debug_init(VXGE_ERR, | |
1551 | "%s:%d Enabling multicast failed", | |
1552 | __func__, __LINE__); | |
1553 | } | |
703da5a1 RV |
1554 | |
1555 | /* Enable the interrupts */ | |
1556 | vxge_vpath_intr_enable(vdev, vp_id); | |
1557 | ||
1558 | smp_wmb(); | |
1559 | ||
1560 | /* Enable the flow of traffic through the vpath */ | |
7adf7d1b | 1561 | vxge_hw_vpath_enable(vpath->handle); |
703da5a1 RV |
1562 | |
1563 | smp_wmb(); | |
7adf7d1b JM |
1564 | vxge_hw_vpath_rx_doorbell_init(vpath->handle); |
1565 | vpath->ring.last_status = VXGE_HW_OK; | |
703da5a1 RV |
1566 | |
1567 | /* Vpath reset done */ | |
1568 | clear_bit(vp_id, &vdev->vp_reset); | |
1569 | ||
1570 | /* Start the vpath queue */ | |
98f45da2 JM |
1571 | if (netif_tx_queue_stopped(vpath->fifo.txq)) |
1572 | netif_tx_wake_queue(vpath->fifo.txq); | |
703da5a1 RV |
1573 | |
1574 | return ret; | |
1575 | } | |
1576 | ||
16fded7d JM |
1577 | /* Configure CI */ |
1578 | static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev) | |
1579 | { | |
1580 | int i = 0; | |
1581 | ||
1582 | /* Enable CI for RTI */ | |
1583 | if (vdev->config.intr_type == MSI_X) { | |
1584 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
1585 | struct __vxge_hw_ring *hw_ring; | |
1586 | ||
1587 | hw_ring = vdev->vpaths[i].ring.handle; | |
1588 | vxge_hw_vpath_dynamic_rti_ci_set(hw_ring); | |
1589 | } | |
1590 | } | |
1591 | ||
1592 | /* Enable CI for TTI */ | |
1593 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
1594 | struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle; | |
1595 | vxge_hw_vpath_tti_ci_set(hw_fifo); | |
1596 | /* | |
1597 | * For Inta (with or without napi), Set CI ON for only one | |
1598 | * vpath. (Have only one free running timer). | |
1599 | */ | |
1600 | if ((vdev->config.intr_type == INTA) && (i == 0)) | |
1601 | break; | |
1602 | } | |
1603 | ||
1604 | return; | |
1605 | } | |
1606 | ||
703da5a1 RV |
1607 | static int do_vxge_reset(struct vxgedev *vdev, int event) |
1608 | { | |
1609 | enum vxge_hw_status status; | |
1610 | int ret = 0, vp_id, i; | |
1611 | ||
1612 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); | |
1613 | ||
1614 | if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) { | |
1615 | /* check if device is down already */ | |
1616 | if (unlikely(!is_vxge_card_up(vdev))) | |
1617 | return 0; | |
1618 | ||
1619 | /* is reset already scheduled */ | |
1620 | if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) | |
1621 | return 0; | |
1622 | } | |
1623 | ||
1624 | if (event == VXGE_LL_FULL_RESET) { | |
2e41f644 JM |
1625 | netif_carrier_off(vdev->ndev); |
1626 | ||
703da5a1 RV |
1627 | /* wait for all the vpath reset to complete */ |
1628 | for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { | |
1629 | while (test_bit(vp_id, &vdev->vp_reset)) | |
1630 | msleep(50); | |
1631 | } | |
1632 | ||
2e41f644 JM |
1633 | netif_carrier_on(vdev->ndev); |
1634 | ||
703da5a1 RV |
1635 | /* if execution mode is set to debug, don't reset the adapter */ |
1636 | if (unlikely(vdev->exec_mode)) { | |
1637 | vxge_debug_init(VXGE_ERR, | |
1638 | "%s: execution mode is debug, returning..", | |
1639 | vdev->ndev->name); | |
7adf7d1b JM |
1640 | clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); |
1641 | netif_tx_stop_all_queues(vdev->ndev); | |
1642 | return 0; | |
703da5a1 RV |
1643 | } |
1644 | } | |
1645 | ||
1646 | if (event == VXGE_LL_FULL_RESET) { | |
4d2a5b40 | 1647 | vxge_hw_device_wait_receive_idle(vdev->devh); |
703da5a1 RV |
1648 | vxge_hw_device_intr_disable(vdev->devh); |
1649 | ||
1650 | switch (vdev->cric_err_event) { | |
1651 | case VXGE_HW_EVENT_UNKNOWN: | |
d03848e0 | 1652 | netif_tx_stop_all_queues(vdev->ndev); |
703da5a1 RV |
1653 | vxge_debug_init(VXGE_ERR, |
1654 | "fatal: %s: Disabling device due to" | |
1655 | "unknown error", | |
1656 | vdev->ndev->name); | |
1657 | ret = -EPERM; | |
1658 | goto out; | |
1659 | case VXGE_HW_EVENT_RESET_START: | |
1660 | break; | |
1661 | case VXGE_HW_EVENT_RESET_COMPLETE: | |
1662 | case VXGE_HW_EVENT_LINK_DOWN: | |
1663 | case VXGE_HW_EVENT_LINK_UP: | |
1664 | case VXGE_HW_EVENT_ALARM_CLEARED: | |
1665 | case VXGE_HW_EVENT_ECCERR: | |
1666 | case VXGE_HW_EVENT_MRPCIM_ECCERR: | |
1667 | ret = -EPERM; | |
1668 | goto out; | |
1669 | case VXGE_HW_EVENT_FIFO_ERR: | |
1670 | case VXGE_HW_EVENT_VPATH_ERR: | |
1671 | break; | |
1672 | case VXGE_HW_EVENT_CRITICAL_ERR: | |
d03848e0 | 1673 | netif_tx_stop_all_queues(vdev->ndev); |
703da5a1 RV |
1674 | vxge_debug_init(VXGE_ERR, |
1675 | "fatal: %s: Disabling device due to" | |
1676 | "serious error", | |
1677 | vdev->ndev->name); | |
1678 | /* SOP or device reset required */ | |
1679 | /* This event is not currently used */ | |
1680 | ret = -EPERM; | |
1681 | goto out; | |
1682 | case VXGE_HW_EVENT_SERR: | |
d03848e0 | 1683 | netif_tx_stop_all_queues(vdev->ndev); |
703da5a1 RV |
1684 | vxge_debug_init(VXGE_ERR, |
1685 | "fatal: %s: Disabling device due to" | |
1686 | "serious error", | |
1687 | vdev->ndev->name); | |
1688 | ret = -EPERM; | |
1689 | goto out; | |
1690 | case VXGE_HW_EVENT_SRPCIM_SERR: | |
1691 | case VXGE_HW_EVENT_MRPCIM_SERR: | |
1692 | ret = -EPERM; | |
1693 | goto out; | |
1694 | case VXGE_HW_EVENT_SLOT_FREEZE: | |
d03848e0 | 1695 | netif_tx_stop_all_queues(vdev->ndev); |
703da5a1 RV |
1696 | vxge_debug_init(VXGE_ERR, |
1697 | "fatal: %s: Disabling device due to" | |
1698 | "slot freeze", | |
1699 | vdev->ndev->name); | |
1700 | ret = -EPERM; | |
1701 | goto out; | |
1702 | default: | |
1703 | break; | |
1704 | ||
1705 | } | |
1706 | } | |
1707 | ||
1708 | if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) | |
d03848e0 | 1709 | netif_tx_stop_all_queues(vdev->ndev); |
703da5a1 RV |
1710 | |
1711 | if (event == VXGE_LL_FULL_RESET) { | |
1712 | status = vxge_reset_all_vpaths(vdev); | |
1713 | if (status != VXGE_HW_OK) { | |
1714 | vxge_debug_init(VXGE_ERR, | |
1715 | "fatal: %s: can not reset vpaths", | |
1716 | vdev->ndev->name); | |
1717 | ret = -EPERM; | |
1718 | goto out; | |
1719 | } | |
1720 | } | |
1721 | ||
1722 | if (event == VXGE_LL_COMPL_RESET) { | |
1723 | for (i = 0; i < vdev->no_of_vpath; i++) | |
1724 | if (vdev->vpaths[i].handle) { | |
1725 | if (vxge_hw_vpath_recover_from_reset( | |
1726 | vdev->vpaths[i].handle) | |
1727 | != VXGE_HW_OK) { | |
1728 | vxge_debug_init(VXGE_ERR, | |
1729 | "vxge_hw_vpath_recover_" | |
1730 | "from_reset failed for vpath: " | |
1731 | "%d", i); | |
1732 | ret = -EPERM; | |
1733 | goto out; | |
1734 | } | |
1735 | } else { | |
1736 | vxge_debug_init(VXGE_ERR, | |
1737 | "vxge_hw_vpath_reset failed for " | |
1738 | "vpath:%d", i); | |
1739 | ret = -EPERM; | |
1740 | goto out; | |
1741 | } | |
1742 | } | |
1743 | ||
1744 | if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) { | |
1745 | /* Reprogram the DA table with populated mac addresses */ | |
1746 | for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { | |
1747 | vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]); | |
1748 | vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]); | |
1749 | } | |
1750 | ||
1751 | /* enable vpath interrupts */ | |
1752 | for (i = 0; i < vdev->no_of_vpath; i++) | |
1753 | vxge_vpath_intr_enable(vdev, i); | |
1754 | ||
1755 | vxge_hw_device_intr_enable(vdev->devh); | |
1756 | ||
1757 | smp_wmb(); | |
1758 | ||
1759 | /* Indicate card up */ | |
1760 | set_bit(__VXGE_STATE_CARD_UP, &vdev->state); | |
1761 | ||
1762 | /* Get the traffic to flow through the vpaths */ | |
1763 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
1764 | vxge_hw_vpath_enable(vdev->vpaths[i].handle); | |
1765 | smp_wmb(); | |
1766 | vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); | |
1767 | } | |
1768 | ||
d03848e0 | 1769 | netif_tx_wake_all_queues(vdev->ndev); |
703da5a1 RV |
1770 | } |
1771 | ||
16fded7d JM |
1772 | /* configure CI */ |
1773 | vxge_config_ci_for_tti_rti(vdev); | |
1774 | ||
703da5a1 RV |
1775 | out: |
1776 | vxge_debug_entryexit(VXGE_TRACE, | |
1777 | "%s:%d Exiting...", __func__, __LINE__); | |
1778 | ||
1779 | /* Indicate reset done */ | |
1780 | if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) | |
1781 | clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); | |
1782 | return ret; | |
1783 | } | |
1784 | ||
1785 | /* | |
1786 | * vxge_reset | |
1787 | * @vdev: pointer to ll device | |
1788 | * | |
1789 | * driver may reset the chip on events of serr, eccerr, etc | |
1790 | */ | |
2e41f644 | 1791 | static void vxge_reset(struct work_struct *work) |
703da5a1 | 1792 | { |
2e41f644 JM |
1793 | struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task); |
1794 | ||
1795 | if (!netif_running(vdev->ndev)) | |
1796 | return; | |
1797 | ||
1798 | do_vxge_reset(vdev, VXGE_LL_FULL_RESET); | |
703da5a1 RV |
1799 | } |
1800 | ||
1801 | /** | |
1802 | * vxge_poll - Receive handler when Receive Polling is used. | |
1803 | * @dev: pointer to the device structure. | |
1804 | * @budget: Number of packets budgeted to be processed in this iteration. | |
1805 | * | |
1806 | * This function comes into picture only if Receive side is being handled | |
1807 | * through polling (called NAPI in linux). It mostly does what the normal | |
1808 | * Rx interrupt handler does in terms of descriptor and packet processing | |
1809 | * but not in an interrupt context. Also it will process a specified number | |
1810 | * of packets at most in one iteration. This value is passed down by the | |
1811 | * kernel as the function argument 'budget'. | |
1812 | */ | |
1813 | static int vxge_poll_msix(struct napi_struct *napi, int budget) | |
1814 | { | |
16fded7d JM |
1815 | struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi); |
1816 | int pkts_processed; | |
703da5a1 | 1817 | int budget_org = budget; |
703da5a1 | 1818 | |
16fded7d JM |
1819 | ring->budget = budget; |
1820 | ring->pkts_processed = 0; | |
703da5a1 | 1821 | vxge_hw_vpath_poll_rx(ring->handle); |
16fded7d | 1822 | pkts_processed = ring->pkts_processed; |
703da5a1 RV |
1823 | |
1824 | if (ring->pkts_processed < budget_org) { | |
1825 | napi_complete(napi); | |
16fded7d | 1826 | |
703da5a1 RV |
1827 | /* Re enable the Rx interrupts for the vpath */ |
1828 | vxge_hw_channel_msix_unmask( | |
1829 | (struct __vxge_hw_channel *)ring->handle, | |
1830 | ring->rx_vector_no); | |
16fded7d | 1831 | mmiowb(); |
703da5a1 RV |
1832 | } |
1833 | ||
16fded7d JM |
1834 | /* We are copying and returning the local variable, in case if after |
1835 | * clearing the msix interrupt above, if the interrupt fires right | |
1836 | * away which can preempt this NAPI thread */ | |
1837 | return pkts_processed; | |
703da5a1 RV |
1838 | } |
1839 | ||
1840 | static int vxge_poll_inta(struct napi_struct *napi, int budget) | |
1841 | { | |
1842 | struct vxgedev *vdev = container_of(napi, struct vxgedev, napi); | |
1843 | int pkts_processed = 0; | |
1844 | int i; | |
1845 | int budget_org = budget; | |
1846 | struct vxge_ring *ring; | |
1847 | ||
d8ee7071 | 1848 | struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev); |
703da5a1 RV |
1849 | |
1850 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
1851 | ring = &vdev->vpaths[i].ring; | |
1852 | ring->budget = budget; | |
16fded7d | 1853 | ring->pkts_processed = 0; |
703da5a1 RV |
1854 | vxge_hw_vpath_poll_rx(ring->handle); |
1855 | pkts_processed += ring->pkts_processed; | |
1856 | budget -= ring->pkts_processed; | |
1857 | if (budget <= 0) | |
1858 | break; | |
1859 | } | |
1860 | ||
1861 | VXGE_COMPLETE_ALL_TX(vdev); | |
1862 | ||
1863 | if (pkts_processed < budget_org) { | |
1864 | napi_complete(napi); | |
1865 | /* Re enable the Rx interrupts for the ring */ | |
1866 | vxge_hw_device_unmask_all(hldev); | |
1867 | vxge_hw_device_flush_io(hldev); | |
1868 | } | |
1869 | ||
1870 | return pkts_processed; | |
1871 | } | |
1872 | ||
1873 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1874 | /** | |
1875 | * vxge_netpoll - netpoll event handler entry point | |
1876 | * @dev : pointer to the device structure. | |
1877 | * Description: | |
1878 | * This function will be called by upper layer to check for events on the | |
1879 | * interface in situations where interrupts are disabled. It is used for | |
1880 | * specific in-kernel networking tasks, such as remote consoles and kernel | |
1881 | * debugging over the network (example netdump in RedHat). | |
1882 | */ | |
1883 | static void vxge_netpoll(struct net_device *dev) | |
1884 | { | |
32e819e4 FR |
1885 | struct vxgedev *vdev = netdev_priv(dev); |
1886 | struct pci_dev *pdev = vdev->pdev; | |
1887 | struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); | |
1888 | const int irq = pdev->irq; | |
703da5a1 RV |
1889 | |
1890 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); | |
1891 | ||
32e819e4 | 1892 | if (pci_channel_offline(pdev)) |
703da5a1 RV |
1893 | return; |
1894 | ||
32e819e4 | 1895 | disable_irq(irq); |
703da5a1 RV |
1896 | vxge_hw_device_clear_tx_rx(hldev); |
1897 | ||
1898 | vxge_hw_device_clear_tx_rx(hldev); | |
1899 | VXGE_COMPLETE_ALL_RX(vdev); | |
1900 | VXGE_COMPLETE_ALL_TX(vdev); | |
1901 | ||
32e819e4 | 1902 | enable_irq(irq); |
703da5a1 RV |
1903 | |
1904 | vxge_debug_entryexit(VXGE_TRACE, | |
1905 | "%s:%d Exiting...", __func__, __LINE__); | |
703da5a1 RV |
1906 | } |
1907 | #endif | |
1908 | ||
1909 | /* RTH configuration */ | |
1910 | static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) | |
1911 | { | |
1912 | enum vxge_hw_status status = VXGE_HW_OK; | |
1913 | struct vxge_hw_rth_hash_types hash_types; | |
1914 | u8 itable[256] = {0}; /* indirection table */ | |
1915 | u8 mtable[256] = {0}; /* CPU to vpath mapping */ | |
1916 | int index; | |
1917 | ||
1918 | /* | |
1919 | * Filling | |
1920 | * - itable with bucket numbers | |
1921 | * - mtable with bucket-to-vpath mapping | |
1922 | */ | |
1923 | for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) { | |
1924 | itable[index] = index; | |
1925 | mtable[index] = index % vdev->no_of_vpath; | |
1926 | } | |
1927 | ||
703da5a1 RV |
1928 | /* set indirection table, bucket-to-vpath mapping */ |
1929 | status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, | |
1930 | vdev->no_of_vpath, | |
1931 | mtable, itable, | |
1932 | vdev->config.rth_bkt_sz); | |
1933 | if (status != VXGE_HW_OK) { | |
1934 | vxge_debug_init(VXGE_ERR, | |
1935 | "RTH indirection table configuration failed " | |
1936 | "for vpath:%d", vdev->vpaths[0].device_id); | |
1937 | return status; | |
1938 | } | |
1939 | ||
47f01db4 JM |
1940 | /* Fill RTH hash types */ |
1941 | hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4; | |
1942 | hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4; | |
1943 | hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6; | |
1944 | hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6; | |
1945 | hash_types.hash_type_tcpipv6ex_en = | |
1946 | vdev->config.rth_hash_type_tcpipv6ex; | |
1947 | hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex; | |
1948 | ||
703da5a1 | 1949 | /* |
47f01db4 JM |
1950 | * Because the itable_set() method uses the active_table field |
1951 | * for the target virtual path the RTH config should be updated | |
1952 | * for all VPATHs. The h/w only uses the lowest numbered VPATH | |
1953 | * when steering frames. | |
1954 | */ | |
703da5a1 RV |
1955 | for (index = 0; index < vdev->no_of_vpath; index++) { |
1956 | status = vxge_hw_vpath_rts_rth_set( | |
1957 | vdev->vpaths[index].handle, | |
1958 | vdev->config.rth_algorithm, | |
1959 | &hash_types, | |
1960 | vdev->config.rth_bkt_sz); | |
703da5a1 RV |
1961 | if (status != VXGE_HW_OK) { |
1962 | vxge_debug_init(VXGE_ERR, | |
1963 | "RTH configuration failed for vpath:%d", | |
1964 | vdev->vpaths[index].device_id); | |
1965 | return status; | |
1966 | } | |
1967 | } | |
1968 | ||
1969 | return status; | |
1970 | } | |
1971 | ||
703da5a1 | 1972 | /* reset vpaths */ |
4d2a5b40 | 1973 | enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) |
703da5a1 | 1974 | { |
703da5a1 | 1975 | enum vxge_hw_status status = VXGE_HW_OK; |
7adf7d1b JM |
1976 | struct vxge_vpath *vpath; |
1977 | int i; | |
703da5a1 | 1978 | |
7adf7d1b JM |
1979 | for (i = 0; i < vdev->no_of_vpath; i++) { |
1980 | vpath = &vdev->vpaths[i]; | |
1981 | if (vpath->handle) { | |
1982 | if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) { | |
703da5a1 RV |
1983 | if (is_vxge_card_up(vdev) && |
1984 | vxge_hw_vpath_recover_from_reset( | |
7adf7d1b | 1985 | vpath->handle) != VXGE_HW_OK) { |
703da5a1 RV |
1986 | vxge_debug_init(VXGE_ERR, |
1987 | "vxge_hw_vpath_recover_" | |
1988 | "from_reset failed for vpath: " | |
1989 | "%d", i); | |
1990 | return status; | |
1991 | } | |
1992 | } else { | |
1993 | vxge_debug_init(VXGE_ERR, | |
1994 | "vxge_hw_vpath_reset failed for " | |
1995 | "vpath:%d", i); | |
1996 | return status; | |
1997 | } | |
1998 | } | |
7adf7d1b JM |
1999 | } |
2000 | ||
703da5a1 RV |
2001 | return status; |
2002 | } | |
2003 | ||
2004 | /* close vpaths */ | |
42821a5b | 2005 | static void vxge_close_vpaths(struct vxgedev *vdev, int index) |
703da5a1 | 2006 | { |
7adf7d1b | 2007 | struct vxge_vpath *vpath; |
703da5a1 | 2008 | int i; |
7adf7d1b | 2009 | |
703da5a1 | 2010 | for (i = index; i < vdev->no_of_vpath; i++) { |
7adf7d1b JM |
2011 | vpath = &vdev->vpaths[i]; |
2012 | ||
2013 | if (vpath->handle && vpath->is_open) { | |
2014 | vxge_hw_vpath_close(vpath->handle); | |
703da5a1 RV |
2015 | vdev->stats.vpaths_open--; |
2016 | } | |
7adf7d1b JM |
2017 | vpath->is_open = 0; |
2018 | vpath->handle = NULL; | |
703da5a1 RV |
2019 | } |
2020 | } | |
2021 | ||
2022 | /* open vpaths */ | |
42821a5b | 2023 | static int vxge_open_vpaths(struct vxgedev *vdev) |
703da5a1 | 2024 | { |
7adf7d1b | 2025 | struct vxge_hw_vpath_attr attr; |
703da5a1 | 2026 | enum vxge_hw_status status; |
7adf7d1b | 2027 | struct vxge_vpath *vpath; |
703da5a1 | 2028 | u32 vp_id = 0; |
7adf7d1b | 2029 | int i; |
703da5a1 RV |
2030 | |
2031 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
7adf7d1b | 2032 | vpath = &vdev->vpaths[i]; |
7adf7d1b | 2033 | vxge_assert(vpath->is_configured); |
e7935c96 JM |
2034 | |
2035 | if (!vdev->titan1) { | |
2036 | struct vxge_hw_vp_config *vcfg; | |
2037 | vcfg = &vdev->devh->config.vp_config[vpath->device_id]; | |
2038 | ||
2039 | vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A; | |
2040 | vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B; | |
2041 | vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C; | |
2042 | vcfg->tti.uec_a = TTI_T1A_TX_UFC_A; | |
2043 | vcfg->tti.uec_b = TTI_T1A_TX_UFC_B; | |
2044 | vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu); | |
2045 | vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu); | |
2046 | vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL; | |
2047 | vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL; | |
2048 | } | |
2049 | ||
7adf7d1b | 2050 | attr.vp_id = vpath->device_id; |
703da5a1 RV |
2051 | attr.fifo_attr.callback = vxge_xmit_compl; |
2052 | attr.fifo_attr.txdl_term = vxge_tx_term; | |
2053 | attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv); | |
7adf7d1b | 2054 | attr.fifo_attr.userdata = &vpath->fifo; |
703da5a1 RV |
2055 | |
2056 | attr.ring_attr.callback = vxge_rx_1b_compl; | |
2057 | attr.ring_attr.rxd_init = vxge_rx_initial_replenish; | |
2058 | attr.ring_attr.rxd_term = vxge_rx_term; | |
2059 | attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv); | |
7adf7d1b | 2060 | attr.ring_attr.userdata = &vpath->ring; |
703da5a1 | 2061 | |
7adf7d1b JM |
2062 | vpath->ring.ndev = vdev->ndev; |
2063 | vpath->ring.pdev = vdev->pdev; | |
528f7272 | 2064 | |
7adf7d1b | 2065 | status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); |
703da5a1 | 2066 | if (status == VXGE_HW_OK) { |
7adf7d1b | 2067 | vpath->fifo.handle = |
703da5a1 | 2068 | (struct __vxge_hw_fifo *)attr.fifo_attr.userdata; |
7adf7d1b | 2069 | vpath->ring.handle = |
703da5a1 | 2070 | (struct __vxge_hw_ring *)attr.ring_attr.userdata; |
7adf7d1b | 2071 | vpath->fifo.tx_steering_type = |
703da5a1 | 2072 | vdev->config.tx_steering_type; |
7adf7d1b JM |
2073 | vpath->fifo.ndev = vdev->ndev; |
2074 | vpath->fifo.pdev = vdev->pdev; | |
98f45da2 JM |
2075 | if (vdev->config.tx_steering_type) |
2076 | vpath->fifo.txq = | |
2077 | netdev_get_tx_queue(vdev->ndev, i); | |
2078 | else | |
2079 | vpath->fifo.txq = | |
2080 | netdev_get_tx_queue(vdev->ndev, 0); | |
7adf7d1b | 2081 | vpath->fifo.indicate_max_pkts = |
703da5a1 | 2082 | vdev->config.fifo_indicate_max_pkts; |
16fded7d | 2083 | vpath->fifo.tx_vector_no = 0; |
7adf7d1b | 2084 | vpath->ring.rx_vector_no = 0; |
b81b3733 | 2085 | vpath->ring.rx_hwts = vdev->rx_hwts; |
7adf7d1b JM |
2086 | vpath->is_open = 1; |
2087 | vdev->vp_handles[i] = vpath->handle; | |
7adf7d1b | 2088 | vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip; |
703da5a1 RV |
2089 | vdev->stats.vpaths_open++; |
2090 | } else { | |
2091 | vdev->stats.vpath_open_fail++; | |
528f7272 JM |
2092 | vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to " |
2093 | "open with status: %d", | |
2094 | vdev->ndev->name, vpath->device_id, | |
2095 | status); | |
703da5a1 RV |
2096 | vxge_close_vpaths(vdev, 0); |
2097 | return -EPERM; | |
2098 | } | |
2099 | ||
7adf7d1b | 2100 | vp_id = vpath->handle->vpath->vp_id; |
703da5a1 RV |
2101 | vdev->vpaths_deployed |= vxge_mBIT(vp_id); |
2102 | } | |
528f7272 | 2103 | |
703da5a1 RV |
2104 | return VXGE_HW_OK; |
2105 | } | |
2106 | ||
16fded7d JM |
2107 | /** |
2108 | * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing | |
2109 | * if the interrupts are not within a range | |
2110 | * @fifo: pointer to transmit fifo structure | |
2111 | * Description: The function changes boundary timer and restriction timer | |
2112 | * value depends on the traffic | |
2113 | * Return Value: None | |
2114 | */ | |
2115 | static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo) | |
2116 | { | |
2117 | fifo->interrupt_count++; | |
2118 | if (jiffies > fifo->jiffies + HZ / 100) { | |
2119 | struct __vxge_hw_fifo *hw_fifo = fifo->handle; | |
2120 | ||
2121 | fifo->jiffies = jiffies; | |
2122 | if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT && | |
2123 | hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) { | |
2124 | hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL; | |
2125 | vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); | |
2126 | } else if (hw_fifo->rtimer != 0) { | |
2127 | hw_fifo->rtimer = 0; | |
2128 | vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); | |
2129 | } | |
2130 | fifo->interrupt_count = 0; | |
2131 | } | |
2132 | } | |
2133 | ||
2134 | /** | |
2135 | * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing | |
2136 | * if the interrupts are not within a range | |
2137 | * @ring: pointer to receive ring structure | |
2138 | * Description: The function increases of decreases the packet counts within | |
2139 | * the ranges of traffic utilization, if the interrupts due to this ring are | |
2140 | * not within a fixed range. | |
2141 | * Return Value: Nothing | |
2142 | */ | |
2143 | static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) | |
2144 | { | |
2145 | ring->interrupt_count++; | |
2146 | if (jiffies > ring->jiffies + HZ / 100) { | |
2147 | struct __vxge_hw_ring *hw_ring = ring->handle; | |
2148 | ||
2149 | ring->jiffies = jiffies; | |
2150 | if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT && | |
2151 | hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) { | |
2152 | hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL; | |
2153 | vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); | |
2154 | } else if (hw_ring->rtimer != 0) { | |
2155 | hw_ring->rtimer = 0; | |
2156 | vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); | |
2157 | } | |
2158 | ring->interrupt_count = 0; | |
2159 | } | |
2160 | } | |
2161 | ||
703da5a1 RV |
2162 | /* |
2163 | * vxge_isr_napi | |
2164 | * @irq: the irq of the device. | |
2165 | * @dev_id: a void pointer to the hldev structure of the Titan device | |
2166 | * @ptregs: pointer to the registers pushed on the stack. | |
2167 | * | |
2168 | * This function is the ISR handler of the device when napi is enabled. It | |
2169 | * identifies the reason for the interrupt and calls the relevant service | |
2170 | * routines. | |
2171 | */ | |
2172 | static irqreturn_t vxge_isr_napi(int irq, void *dev_id) | |
2173 | { | |
703da5a1 | 2174 | struct net_device *dev; |
a5d165b5 | 2175 | struct __vxge_hw_device *hldev; |
703da5a1 RV |
2176 | u64 reason; |
2177 | enum vxge_hw_status status; | |
2c91308f | 2178 | struct vxgedev *vdev = (struct vxgedev *)dev_id; |
703da5a1 RV |
2179 | |
2180 | vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); | |
2181 | ||
a5d165b5 | 2182 | dev = vdev->ndev; |
d8ee7071 | 2183 | hldev = pci_get_drvdata(vdev->pdev); |
703da5a1 RV |
2184 | |
2185 | if (pci_channel_offline(vdev->pdev)) | |
2186 | return IRQ_NONE; | |
2187 | ||
2188 | if (unlikely(!is_vxge_card_up(vdev))) | |
4d2a5b40 | 2189 | return IRQ_HANDLED; |
703da5a1 | 2190 | |
528f7272 | 2191 | status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason); |
703da5a1 RV |
2192 | if (status == VXGE_HW_OK) { |
2193 | vxge_hw_device_mask_all(hldev); | |
2194 | ||
2195 | if (reason & | |
2196 | VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT( | |
2197 | vdev->vpaths_deployed >> | |
2198 | (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) { | |
2199 | ||
2200 | vxge_hw_device_clear_tx_rx(hldev); | |
2201 | napi_schedule(&vdev->napi); | |
2202 | vxge_debug_intr(VXGE_TRACE, | |
2203 | "%s:%d Exiting...", __func__, __LINE__); | |
2204 | return IRQ_HANDLED; | |
2205 | } else | |
2206 | vxge_hw_device_unmask_all(hldev); | |
2207 | } else if (unlikely((status == VXGE_HW_ERR_VPATH) || | |
2208 | (status == VXGE_HW_ERR_CRITICAL) || | |
2209 | (status == VXGE_HW_ERR_FIFO))) { | |
2210 | vxge_hw_device_mask_all(hldev); | |
2211 | vxge_hw_device_flush_io(hldev); | |
2212 | return IRQ_HANDLED; | |
2213 | } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE)) | |
2214 | return IRQ_HANDLED; | |
2215 | ||
2216 | vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); | |
2217 | return IRQ_NONE; | |
2218 | } | |
2219 | ||
2220 | #ifdef CONFIG_PCI_MSI | |
2221 | ||
16fded7d | 2222 | static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) |
703da5a1 RV |
2223 | { |
2224 | struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; | |
2225 | ||
16fded7d JM |
2226 | adaptive_coalesce_tx_interrupts(fifo); |
2227 | ||
2228 | vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle, | |
2229 | fifo->tx_vector_no); | |
2230 | ||
2231 | vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle, | |
2232 | fifo->tx_vector_no); | |
2233 | ||
703da5a1 RV |
2234 | VXGE_COMPLETE_VPATH_TX(fifo); |
2235 | ||
16fded7d JM |
2236 | vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle, |
2237 | fifo->tx_vector_no); | |
2238 | ||
2239 | mmiowb(); | |
2240 | ||
703da5a1 RV |
2241 | return IRQ_HANDLED; |
2242 | } | |
2243 | ||
16fded7d | 2244 | static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id) |
703da5a1 RV |
2245 | { |
2246 | struct vxge_ring *ring = (struct vxge_ring *)dev_id; | |
2247 | ||
16fded7d JM |
2248 | adaptive_coalesce_rx_interrupts(ring); |
2249 | ||
703da5a1 | 2250 | vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, |
16fded7d JM |
2251 | ring->rx_vector_no); |
2252 | ||
2253 | vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle, | |
2254 | ring->rx_vector_no); | |
703da5a1 RV |
2255 | |
2256 | napi_schedule(&ring->napi); | |
2257 | return IRQ_HANDLED; | |
2258 | } | |
2259 | ||
2260 | static irqreturn_t | |
2261 | vxge_alarm_msix_handle(int irq, void *dev_id) | |
2262 | { | |
2263 | int i; | |
2264 | enum vxge_hw_status status; | |
2265 | struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id; | |
2266 | struct vxgedev *vdev = vpath->vdev; | |
b59c9457 SH |
2267 | int msix_id = (vpath->handle->vpath->vp_id * |
2268 | VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; | |
703da5a1 RV |
2269 | |
2270 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
25985edc | 2271 | /* Reduce the chance of losing alarm interrupts by masking |
16fded7d JM |
2272 | * the vector. A pending bit will be set if an alarm is |
2273 | * generated and on unmask the interrupt will be fired. | |
2274 | */ | |
b59c9457 | 2275 | vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); |
16fded7d JM |
2276 | vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id); |
2277 | mmiowb(); | |
703da5a1 RV |
2278 | |
2279 | status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, | |
2280 | vdev->exec_mode); | |
2281 | if (status == VXGE_HW_OK) { | |
703da5a1 | 2282 | vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, |
16fded7d JM |
2283 | msix_id); |
2284 | mmiowb(); | |
703da5a1 RV |
2285 | continue; |
2286 | } | |
2287 | vxge_debug_intr(VXGE_ERR, | |
2288 | "%s: vxge_hw_vpath_alarm_process failed %x ", | |
2289 | VXGE_DRIVER_NAME, status); | |
2290 | } | |
2291 | return IRQ_HANDLED; | |
2292 | } | |
2293 | ||
2294 | static int vxge_alloc_msix(struct vxgedev *vdev) | |
2295 | { | |
2296 | int j, i, ret = 0; | |
b59c9457 | 2297 | int msix_intr_vect = 0, temp; |
703da5a1 RV |
2298 | vdev->intr_cnt = 0; |
2299 | ||
b59c9457 | 2300 | start: |
703da5a1 RV |
2301 | /* Tx/Rx MSIX Vectors count */ |
2302 | vdev->intr_cnt = vdev->no_of_vpath * 2; | |
2303 | ||
2304 | /* Alarm MSIX Vectors count */ | |
2305 | vdev->intr_cnt++; | |
2306 | ||
baeb2ffa JP |
2307 | vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry), |
2308 | GFP_KERNEL); | |
703da5a1 RV |
2309 | if (!vdev->entries) { |
2310 | vxge_debug_init(VXGE_ERR, | |
2311 | "%s: memory allocation failed", | |
2312 | VXGE_DRIVER_NAME); | |
cc413d90 MS |
2313 | ret = -ENOMEM; |
2314 | goto alloc_entries_failed; | |
703da5a1 RV |
2315 | } |
2316 | ||
baeb2ffa JP |
2317 | vdev->vxge_entries = kcalloc(vdev->intr_cnt, |
2318 | sizeof(struct vxge_msix_entry), | |
2319 | GFP_KERNEL); | |
703da5a1 RV |
2320 | if (!vdev->vxge_entries) { |
2321 | vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", | |
2322 | VXGE_DRIVER_NAME); | |
cc413d90 MS |
2323 | ret = -ENOMEM; |
2324 | goto alloc_vxge_entries_failed; | |
703da5a1 RV |
2325 | } |
2326 | ||
b59c9457 | 2327 | for (i = 0, j = 0; i < vdev->no_of_vpath; i++) { |
703da5a1 RV |
2328 | |
2329 | msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; | |
2330 | ||
2331 | /* Initialize the fifo vector */ | |
2332 | vdev->entries[j].entry = msix_intr_vect; | |
2333 | vdev->vxge_entries[j].entry = msix_intr_vect; | |
2334 | vdev->vxge_entries[j].in_use = 0; | |
2335 | j++; | |
2336 | ||
2337 | /* Initialize the ring vector */ | |
2338 | vdev->entries[j].entry = msix_intr_vect + 1; | |
2339 | vdev->vxge_entries[j].entry = msix_intr_vect + 1; | |
2340 | vdev->vxge_entries[j].in_use = 0; | |
2341 | j++; | |
2342 | } | |
2343 | ||
2344 | /* Initialize the alarm vector */ | |
b59c9457 SH |
2345 | vdev->entries[j].entry = VXGE_ALARM_MSIX_ID; |
2346 | vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID; | |
703da5a1 RV |
2347 | vdev->vxge_entries[j].in_use = 0; |
2348 | ||
b59c9457 | 2349 | ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt); |
b59c9457 | 2350 | if (ret > 0) { |
703da5a1 RV |
2351 | vxge_debug_init(VXGE_ERR, |
2352 | "%s: MSI-X enable failed for %d vectors, ret: %d", | |
b59c9457 | 2353 | VXGE_DRIVER_NAME, vdev->intr_cnt, ret); |
cc413d90 MS |
2354 | if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) { |
2355 | ret = -ENODEV; | |
2356 | goto enable_msix_failed; | |
2357 | } | |
2358 | ||
703da5a1 RV |
2359 | kfree(vdev->entries); |
2360 | kfree(vdev->vxge_entries); | |
2361 | vdev->entries = NULL; | |
2362 | vdev->vxge_entries = NULL; | |
b59c9457 SH |
2363 | /* Try with less no of vector by reducing no of vpaths count */ |
2364 | temp = (ret - 1)/2; | |
2365 | vxge_close_vpaths(vdev, temp); | |
2366 | vdev->no_of_vpath = temp; | |
2367 | goto start; | |
cc413d90 MS |
2368 | } else if (ret < 0) { |
2369 | ret = -ENODEV; | |
2370 | goto enable_msix_failed; | |
2371 | } | |
703da5a1 | 2372 | return 0; |
cc413d90 MS |
2373 | |
2374 | enable_msix_failed: | |
2375 | kfree(vdev->vxge_entries); | |
2376 | alloc_vxge_entries_failed: | |
2377 | kfree(vdev->entries); | |
2378 | alloc_entries_failed: | |
2379 | return ret; | |
703da5a1 RV |
2380 | } |
2381 | ||
2382 | static int vxge_enable_msix(struct vxgedev *vdev) | |
2383 | { | |
2384 | ||
2385 | int i, ret = 0; | |
703da5a1 | 2386 | /* 0 - Tx, 1 - Rx */ |
b59c9457 SH |
2387 | int tim_msix_id[4] = {0, 1, 0, 0}; |
2388 | ||
703da5a1 RV |
2389 | vdev->intr_cnt = 0; |
2390 | ||
2391 | /* allocate msix vectors */ | |
2392 | ret = vxge_alloc_msix(vdev); | |
2393 | if (!ret) { | |
703da5a1 | 2394 | for (i = 0; i < vdev->no_of_vpath; i++) { |
7adf7d1b | 2395 | struct vxge_vpath *vpath = &vdev->vpaths[i]; |
703da5a1 | 2396 | |
7adf7d1b JM |
2397 | /* If fifo or ring are not enabled, the MSIX vector for |
2398 | * it should be set to 0. | |
2399 | */ | |
2400 | vpath->ring.rx_vector_no = (vpath->device_id * | |
2401 | VXGE_HW_VPATH_MSIX_ACTIVE) + 1; | |
703da5a1 | 2402 | |
16fded7d JM |
2403 | vpath->fifo.tx_vector_no = (vpath->device_id * |
2404 | VXGE_HW_VPATH_MSIX_ACTIVE); | |
2405 | ||
7adf7d1b JM |
2406 | vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, |
2407 | VXGE_ALARM_MSIX_ID); | |
703da5a1 RV |
2408 | } |
2409 | } | |
2410 | ||
2411 | return ret; | |
2412 | } | |
2413 | ||
2414 | static void vxge_rem_msix_isr(struct vxgedev *vdev) | |
2415 | { | |
2416 | int intr_cnt; | |
2417 | ||
b59c9457 | 2418 | for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1); |
703da5a1 RV |
2419 | intr_cnt++) { |
2420 | if (vdev->vxge_entries[intr_cnt].in_use) { | |
2421 | synchronize_irq(vdev->entries[intr_cnt].vector); | |
2422 | free_irq(vdev->entries[intr_cnt].vector, | |
2423 | vdev->vxge_entries[intr_cnt].arg); | |
2424 | vdev->vxge_entries[intr_cnt].in_use = 0; | |
2425 | } | |
2426 | } | |
2427 | ||
2428 | kfree(vdev->entries); | |
2429 | kfree(vdev->vxge_entries); | |
2430 | vdev->entries = NULL; | |
2431 | vdev->vxge_entries = NULL; | |
2432 | ||
2433 | if (vdev->config.intr_type == MSI_X) | |
2434 | pci_disable_msix(vdev->pdev); | |
2435 | } | |
2436 | #endif | |
2437 | ||
2438 | static void vxge_rem_isr(struct vxgedev *vdev) | |
2439 | { | |
2c91308f | 2440 | struct __vxge_hw_device *hldev; |
d8ee7071 | 2441 | hldev = pci_get_drvdata(vdev->pdev); |
703da5a1 RV |
2442 | |
2443 | #ifdef CONFIG_PCI_MSI | |
2444 | if (vdev->config.intr_type == MSI_X) { | |
2445 | vxge_rem_msix_isr(vdev); | |
2446 | } else | |
2447 | #endif | |
2448 | if (vdev->config.intr_type == INTA) { | |
2449 | synchronize_irq(vdev->pdev->irq); | |
a5d165b5 | 2450 | free_irq(vdev->pdev->irq, vdev); |
703da5a1 RV |
2451 | } |
2452 | } | |
2453 | ||
2454 | static int vxge_add_isr(struct vxgedev *vdev) | |
2455 | { | |
2456 | int ret = 0; | |
703da5a1 RV |
2457 | #ifdef CONFIG_PCI_MSI |
2458 | int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; | |
703da5a1 RV |
2459 | int pci_fun = PCI_FUNC(vdev->pdev->devfn); |
2460 | ||
2461 | if (vdev->config.intr_type == MSI_X) | |
2462 | ret = vxge_enable_msix(vdev); | |
2463 | ||
2464 | if (ret) { | |
2465 | vxge_debug_init(VXGE_ERR, | |
2466 | "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME); | |
eb5f10c2 SH |
2467 | vxge_debug_init(VXGE_ERR, |
2468 | "%s: Defaulting to INTA", VXGE_DRIVER_NAME); | |
2469 | vdev->config.intr_type = INTA; | |
703da5a1 RV |
2470 | } |
2471 | ||
2472 | if (vdev->config.intr_type == MSI_X) { | |
2473 | for (intr_idx = 0; | |
2474 | intr_idx < (vdev->no_of_vpath * | |
2475 | VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) { | |
2476 | ||
2477 | msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE; | |
2478 | irq_req = 0; | |
2479 | ||
2480 | switch (msix_idx) { | |
2481 | case 0: | |
2482 | snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, | |
b59c9457 SH |
2483 | "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d", |
2484 | vdev->ndev->name, | |
2485 | vdev->entries[intr_cnt].entry, | |
2486 | pci_fun, vp_idx); | |
703da5a1 RV |
2487 | ret = request_irq( |
2488 | vdev->entries[intr_cnt].vector, | |
2489 | vxge_tx_msix_handle, 0, | |
2490 | vdev->desc[intr_cnt], | |
2491 | &vdev->vpaths[vp_idx].fifo); | |
2492 | vdev->vxge_entries[intr_cnt].arg = | |
2493 | &vdev->vpaths[vp_idx].fifo; | |
2494 | irq_req = 1; | |
2495 | break; | |
2496 | case 1: | |
2497 | snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, | |
b59c9457 SH |
2498 | "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d", |
2499 | vdev->ndev->name, | |
2500 | vdev->entries[intr_cnt].entry, | |
2501 | pci_fun, vp_idx); | |
703da5a1 RV |
2502 | ret = request_irq( |
2503 | vdev->entries[intr_cnt].vector, | |
2504 | vxge_rx_msix_napi_handle, | |
2505 | 0, | |
2506 | vdev->desc[intr_cnt], | |
2507 | &vdev->vpaths[vp_idx].ring); | |
2508 | vdev->vxge_entries[intr_cnt].arg = | |
2509 | &vdev->vpaths[vp_idx].ring; | |
2510 | irq_req = 1; | |
2511 | break; | |
2512 | } | |
2513 | ||
2514 | if (ret) { | |
2515 | vxge_debug_init(VXGE_ERR, | |
2516 | "%s: MSIX - %d Registration failed", | |
2517 | vdev->ndev->name, intr_cnt); | |
2518 | vxge_rem_msix_isr(vdev); | |
eb5f10c2 SH |
2519 | vdev->config.intr_type = INTA; |
2520 | vxge_debug_init(VXGE_ERR, | |
2521 | "%s: Defaulting to INTA" | |
2522 | , vdev->ndev->name); | |
703da5a1 | 2523 | goto INTA_MODE; |
703da5a1 RV |
2524 | } |
2525 | ||
2526 | if (irq_req) { | |
2527 | /* We requested for this msix interrupt */ | |
2528 | vdev->vxge_entries[intr_cnt].in_use = 1; | |
b59c9457 SH |
2529 | msix_idx += vdev->vpaths[vp_idx].device_id * |
2530 | VXGE_HW_VPATH_MSIX_ACTIVE; | |
703da5a1 RV |
2531 | vxge_hw_vpath_msix_unmask( |
2532 | vdev->vpaths[vp_idx].handle, | |
b59c9457 | 2533 | msix_idx); |
703da5a1 RV |
2534 | intr_cnt++; |
2535 | } | |
2536 | ||
2537 | /* Point to next vpath handler */ | |
8e95a202 JP |
2538 | if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) && |
2539 | (vp_idx < (vdev->no_of_vpath - 1))) | |
2540 | vp_idx++; | |
703da5a1 RV |
2541 | } |
2542 | ||
b59c9457 | 2543 | intr_cnt = vdev->no_of_vpath * 2; |
703da5a1 | 2544 | snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, |
b59c9457 SH |
2545 | "%s:vxge:MSI-X %d - Alarm - fn:%d", |
2546 | vdev->ndev->name, | |
2547 | vdev->entries[intr_cnt].entry, | |
2548 | pci_fun); | |
703da5a1 RV |
2549 | /* For Alarm interrupts */ |
2550 | ret = request_irq(vdev->entries[intr_cnt].vector, | |
2551 | vxge_alarm_msix_handle, 0, | |
2552 | vdev->desc[intr_cnt], | |
b59c9457 | 2553 | &vdev->vpaths[0]); |
703da5a1 RV |
2554 | if (ret) { |
2555 | vxge_debug_init(VXGE_ERR, | |
2556 | "%s: MSIX - %d Registration failed", | |
2557 | vdev->ndev->name, intr_cnt); | |
2558 | vxge_rem_msix_isr(vdev); | |
eb5f10c2 SH |
2559 | vdev->config.intr_type = INTA; |
2560 | vxge_debug_init(VXGE_ERR, | |
2561 | "%s: Defaulting to INTA", | |
2562 | vdev->ndev->name); | |
703da5a1 | 2563 | goto INTA_MODE; |
703da5a1 RV |
2564 | } |
2565 | ||
b59c9457 SH |
2566 | msix_idx = (vdev->vpaths[0].handle->vpath->vp_id * |
2567 | VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; | |
703da5a1 | 2568 | vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, |
b59c9457 | 2569 | msix_idx); |
703da5a1 | 2570 | vdev->vxge_entries[intr_cnt].in_use = 1; |
b59c9457 | 2571 | vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0]; |
703da5a1 RV |
2572 | } |
2573 | INTA_MODE: | |
2574 | #endif | |
703da5a1 RV |
2575 | |
2576 | if (vdev->config.intr_type == INTA) { | |
b59c9457 SH |
2577 | snprintf(vdev->desc[0], VXGE_INTR_STRLEN, |
2578 | "%s:vxge:INTA", vdev->ndev->name); | |
eb5f10c2 SH |
2579 | vxge_hw_device_set_intr_type(vdev->devh, |
2580 | VXGE_HW_INTR_MODE_IRQLINE); | |
16fded7d JM |
2581 | |
2582 | vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle); | |
2583 | ||
703da5a1 RV |
2584 | ret = request_irq((int) vdev->pdev->irq, |
2585 | vxge_isr_napi, | |
a5d165b5 | 2586 | IRQF_SHARED, vdev->desc[0], vdev); |
703da5a1 RV |
2587 | if (ret) { |
2588 | vxge_debug_init(VXGE_ERR, | |
2589 | "%s %s-%d: ISR registration failed", | |
2590 | VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq); | |
2591 | return -ENODEV; | |
2592 | } | |
2593 | vxge_debug_init(VXGE_TRACE, | |
2594 | "new %s-%d line allocated", | |
2595 | "IRQ", vdev->pdev->irq); | |
2596 | } | |
2597 | ||
2598 | return VXGE_HW_OK; | |
2599 | } | |
2600 | ||
2601 | static void vxge_poll_vp_reset(unsigned long data) | |
2602 | { | |
2603 | struct vxgedev *vdev = (struct vxgedev *)data; | |
2604 | int i, j = 0; | |
2605 | ||
2606 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
2607 | if (test_bit(i, &vdev->vp_reset)) { | |
2608 | vxge_reset_vpath(vdev, i); | |
2609 | j++; | |
2610 | } | |
2611 | } | |
2612 | if (j && (vdev->config.intr_type != MSI_X)) { | |
2613 | vxge_hw_device_unmask_all(vdev->devh); | |
2614 | vxge_hw_device_flush_io(vdev->devh); | |
2615 | } | |
2616 | ||
2617 | mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2); | |
2618 | } | |
2619 | ||
2620 | static void vxge_poll_vp_lockup(unsigned long data) | |
2621 | { | |
2622 | struct vxgedev *vdev = (struct vxgedev *)data; | |
703da5a1 | 2623 | enum vxge_hw_status status = VXGE_HW_OK; |
7adf7d1b JM |
2624 | struct vxge_vpath *vpath; |
2625 | struct vxge_ring *ring; | |
2626 | int i; | |
62ea0557 | 2627 | unsigned long rx_frms; |
703da5a1 RV |
2628 | |
2629 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
2630 | ring = &vdev->vpaths[i].ring; | |
62ea0557 | 2631 | |
2632 | /* Truncated to machine word size number of frames */ | |
2633 | rx_frms = ACCESS_ONCE(ring->stats.rx_frms); | |
2634 | ||
703da5a1 | 2635 | /* Did this vpath received any packets */ |
62ea0557 | 2636 | if (ring->stats.prev_rx_frms == rx_frms) { |
703da5a1 RV |
2637 | status = vxge_hw_vpath_check_leak(ring->handle); |
2638 | ||
2639 | /* Did it received any packets last time */ | |
2640 | if ((VXGE_HW_FAIL == status) && | |
2641 | (VXGE_HW_FAIL == ring->last_status)) { | |
2642 | ||
2643 | /* schedule vpath reset */ | |
2644 | if (!test_and_set_bit(i, &vdev->vp_reset)) { | |
7adf7d1b | 2645 | vpath = &vdev->vpaths[i]; |
703da5a1 RV |
2646 | |
2647 | /* disable interrupts for this vpath */ | |
2648 | vxge_vpath_intr_disable(vdev, i); | |
2649 | ||
2650 | /* stop the queue for this vpath */ | |
98f45da2 | 2651 | netif_tx_stop_queue(vpath->fifo.txq); |
703da5a1 RV |
2652 | continue; |
2653 | } | |
2654 | } | |
2655 | } | |
62ea0557 | 2656 | ring->stats.prev_rx_frms = rx_frms; |
703da5a1 RV |
2657 | ring->last_status = status; |
2658 | } | |
2659 | ||
2660 | /* Check every 1 milli second */ | |
2661 | mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000); | |
2662 | } | |
2663 | ||
c8f44aff MM |
2664 | static netdev_features_t vxge_fix_features(struct net_device *dev, |
2665 | netdev_features_t features) | |
feb990d4 | 2666 | { |
c8f44aff | 2667 | netdev_features_t changed = dev->features ^ features; |
feb990d4 MM |
2668 | |
2669 | /* Enabling RTH requires some of the logic in vxge_device_register and a | |
2670 | * vpath reset. Due to these restrictions, only allow modification | |
2671 | * while the interface is down. | |
2672 | */ | |
2673 | if ((changed & NETIF_F_RXHASH) && netif_running(dev)) | |
2674 | features ^= NETIF_F_RXHASH; | |
2675 | ||
2676 | return features; | |
2677 | } | |
2678 | ||
c8f44aff | 2679 | static int vxge_set_features(struct net_device *dev, netdev_features_t features) |
feb990d4 MM |
2680 | { |
2681 | struct vxgedev *vdev = netdev_priv(dev); | |
c8f44aff | 2682 | netdev_features_t changed = dev->features ^ features; |
feb990d4 MM |
2683 | |
2684 | if (!(changed & NETIF_F_RXHASH)) | |
2685 | return 0; | |
2686 | ||
2687 | /* !netif_running() ensured by vxge_fix_features() */ | |
2688 | ||
2689 | vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH); | |
2690 | if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) { | |
2691 | dev->features = features ^ NETIF_F_RXHASH; | |
2692 | vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH); | |
2693 | return -EIO; | |
2694 | } | |
2695 | ||
2696 | return 0; | |
2697 | } | |
2698 | ||
703da5a1 RV |
2699 | /** |
2700 | * vxge_open | |
2701 | * @dev: pointer to the device structure. | |
2702 | * | |
2703 | * This function is the open entry point of the driver. It mainly calls a | |
2704 | * function to allocate Rx buffers and inserts them into the buffer | |
2705 | * descriptors and then enables the Rx part of the NIC. | |
2706 | * Return value: '0' on success and an appropriate (-)ve integer as | |
2707 | * defined in errno.h file on failure. | |
2708 | */ | |
528f7272 | 2709 | static int vxge_open(struct net_device *dev) |
703da5a1 RV |
2710 | { |
2711 | enum vxge_hw_status status; | |
2712 | struct vxgedev *vdev; | |
2713 | struct __vxge_hw_device *hldev; | |
7adf7d1b | 2714 | struct vxge_vpath *vpath; |
703da5a1 RV |
2715 | int ret = 0; |
2716 | int i; | |
2717 | u64 val64, function_mode; | |
528f7272 | 2718 | |
703da5a1 RV |
2719 | vxge_debug_entryexit(VXGE_TRACE, |
2720 | "%s: %s:%d", dev->name, __func__, __LINE__); | |
2721 | ||
5f54cebb | 2722 | vdev = netdev_priv(dev); |
d8ee7071 | 2723 | hldev = pci_get_drvdata(vdev->pdev); |
703da5a1 RV |
2724 | function_mode = vdev->config.device_hw_info.function_mode; |
2725 | ||
2726 | /* make sure you have link off by default every time Nic is | |
2727 | * initialized */ | |
2728 | netif_carrier_off(dev); | |
2729 | ||
703da5a1 RV |
2730 | /* Open VPATHs */ |
2731 | status = vxge_open_vpaths(vdev); | |
2732 | if (status != VXGE_HW_OK) { | |
2733 | vxge_debug_init(VXGE_ERR, | |
2734 | "%s: fatal: Vpath open failed", vdev->ndev->name); | |
2735 | ret = -EPERM; | |
2736 | goto out0; | |
2737 | } | |
2738 | ||
2739 | vdev->mtu = dev->mtu; | |
2740 | ||
2741 | status = vxge_add_isr(vdev); | |
2742 | if (status != VXGE_HW_OK) { | |
2743 | vxge_debug_init(VXGE_ERR, | |
2744 | "%s: fatal: ISR add failed", dev->name); | |
2745 | ret = -EPERM; | |
2746 | goto out1; | |
2747 | } | |
2748 | ||
703da5a1 RV |
2749 | if (vdev->config.intr_type != MSI_X) { |
2750 | netif_napi_add(dev, &vdev->napi, vxge_poll_inta, | |
2751 | vdev->config.napi_weight); | |
2752 | napi_enable(&vdev->napi); | |
7adf7d1b JM |
2753 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2754 | vpath = &vdev->vpaths[i]; | |
2755 | vpath->ring.napi_p = &vdev->napi; | |
2756 | } | |
703da5a1 RV |
2757 | } else { |
2758 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
7adf7d1b JM |
2759 | vpath = &vdev->vpaths[i]; |
2760 | netif_napi_add(dev, &vpath->ring.napi, | |
703da5a1 | 2761 | vxge_poll_msix, vdev->config.napi_weight); |
7adf7d1b JM |
2762 | napi_enable(&vpath->ring.napi); |
2763 | vpath->ring.napi_p = &vpath->ring.napi; | |
703da5a1 RV |
2764 | } |
2765 | } | |
2766 | ||
2767 | /* configure RTH */ | |
2768 | if (vdev->config.rth_steering) { | |
2769 | status = vxge_rth_configure(vdev); | |
2770 | if (status != VXGE_HW_OK) { | |
2771 | vxge_debug_init(VXGE_ERR, | |
2772 | "%s: fatal: RTH configuration failed", | |
2773 | dev->name); | |
2774 | ret = -EPERM; | |
2775 | goto out2; | |
2776 | } | |
2777 | } | |
47f01db4 JM |
2778 | printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name, |
2779 | hldev->config.rth_en ? "enabled" : "disabled"); | |
703da5a1 RV |
2780 | |
2781 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
7adf7d1b JM |
2782 | vpath = &vdev->vpaths[i]; |
2783 | ||
703da5a1 | 2784 | /* set initial mtu before enabling the device */ |
7adf7d1b | 2785 | status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu); |
703da5a1 RV |
2786 | if (status != VXGE_HW_OK) { |
2787 | vxge_debug_init(VXGE_ERR, | |
2788 | "%s: fatal: can not set new MTU", dev->name); | |
2789 | ret = -EPERM; | |
2790 | goto out2; | |
2791 | } | |
2792 | } | |
2793 | ||
2794 | VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev); | |
2795 | vxge_debug_init(vdev->level_trace, | |
2796 | "%s: MTU is %d", vdev->ndev->name, vdev->mtu); | |
2797 | VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev); | |
2798 | ||
7adf7d1b JM |
2799 | /* Restore the DA, VID table and also multicast and promiscuous mode |
2800 | * states | |
2801 | */ | |
2802 | if (vdev->all_multi_flg) { | |
2803 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
2804 | vpath = &vdev->vpaths[i]; | |
2805 | vxge_restore_vpath_mac_addr(vpath); | |
2806 | vxge_restore_vpath_vid_table(vpath); | |
2807 | ||
2808 | status = vxge_hw_vpath_mcast_enable(vpath->handle); | |
2809 | if (status != VXGE_HW_OK) | |
2810 | vxge_debug_init(VXGE_ERR, | |
2811 | "%s:%d Enabling multicast failed", | |
2812 | __func__, __LINE__); | |
2813 | } | |
703da5a1 RV |
2814 | } |
2815 | ||
2816 | /* Enable vpath to sniff all unicast/multicast traffic that not | |
25985edc | 2817 | * addressed to them. We allow promiscuous mode for PF only |
703da5a1 RV |
2818 | */ |
2819 | ||
2820 | val64 = 0; | |
2821 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) | |
2822 | val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i); | |
2823 | ||
2824 | vxge_hw_mgmt_reg_write(vdev->devh, | |
2825 | vxge_hw_mgmt_reg_type_mrpcim, | |
2826 | 0, | |
2827 | (ulong)offsetof(struct vxge_hw_mrpcim_reg, | |
2828 | rxmac_authorize_all_addr), | |
2829 | val64); | |
2830 | ||
2831 | vxge_hw_mgmt_reg_write(vdev->devh, | |
2832 | vxge_hw_mgmt_reg_type_mrpcim, | |
2833 | 0, | |
2834 | (ulong)offsetof(struct vxge_hw_mrpcim_reg, | |
2835 | rxmac_authorize_all_vid), | |
2836 | val64); | |
2837 | ||
2838 | vxge_set_multicast(dev); | |
2839 | ||
2840 | /* Enabling Bcast and mcast for all vpath */ | |
2841 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
7adf7d1b JM |
2842 | vpath = &vdev->vpaths[i]; |
2843 | status = vxge_hw_vpath_bcast_enable(vpath->handle); | |
703da5a1 RV |
2844 | if (status != VXGE_HW_OK) |
2845 | vxge_debug_init(VXGE_ERR, | |
2846 | "%s : Can not enable bcast for vpath " | |
2847 | "id %d", dev->name, i); | |
2848 | if (vdev->config.addr_learn_en) { | |
7adf7d1b | 2849 | status = vxge_hw_vpath_mcast_enable(vpath->handle); |
703da5a1 RV |
2850 | if (status != VXGE_HW_OK) |
2851 | vxge_debug_init(VXGE_ERR, | |
2852 | "%s : Can not enable mcast for vpath " | |
2853 | "id %d", dev->name, i); | |
2854 | } | |
2855 | } | |
2856 | ||
2857 | vxge_hw_device_setpause_data(vdev->devh, 0, | |
2858 | vdev->config.tx_pause_enable, | |
2859 | vdev->config.rx_pause_enable); | |
2860 | ||
2861 | if (vdev->vp_reset_timer.function == NULL) | |
044a3813 JP |
2862 | vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset, vdev, |
2863 | HZ / 2); | |
703da5a1 | 2864 | |
e7935c96 JM |
2865 | /* There is no need to check for RxD leak and RxD lookup on Titan1A */ |
2866 | if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL) | |
044a3813 | 2867 | vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev, |
e7935c96 | 2868 | HZ / 2); |
703da5a1 RV |
2869 | |
2870 | set_bit(__VXGE_STATE_CARD_UP, &vdev->state); | |
2871 | ||
2872 | smp_wmb(); | |
2873 | ||
2874 | if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) { | |
2875 | netif_carrier_on(vdev->ndev); | |
75f5e1c6 | 2876 | netdev_notice(vdev->ndev, "Link Up\n"); |
703da5a1 RV |
2877 | vdev->stats.link_up++; |
2878 | } | |
2879 | ||
2880 | vxge_hw_device_intr_enable(vdev->devh); | |
2881 | ||
2882 | smp_wmb(); | |
2883 | ||
2884 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
7adf7d1b JM |
2885 | vpath = &vdev->vpaths[i]; |
2886 | ||
2887 | vxge_hw_vpath_enable(vpath->handle); | |
703da5a1 | 2888 | smp_wmb(); |
7adf7d1b | 2889 | vxge_hw_vpath_rx_doorbell_init(vpath->handle); |
703da5a1 RV |
2890 | } |
2891 | ||
d03848e0 | 2892 | netif_tx_start_all_queues(vdev->ndev); |
16fded7d JM |
2893 | |
2894 | /* configure CI */ | |
2895 | vxge_config_ci_for_tti_rti(vdev); | |
2896 | ||
703da5a1 RV |
2897 | goto out0; |
2898 | ||
2899 | out2: | |
2900 | vxge_rem_isr(vdev); | |
2901 | ||
2902 | /* Disable napi */ | |
2903 | if (vdev->config.intr_type != MSI_X) | |
2904 | napi_disable(&vdev->napi); | |
2905 | else { | |
2906 | for (i = 0; i < vdev->no_of_vpath; i++) | |
2907 | napi_disable(&vdev->vpaths[i].ring.napi); | |
2908 | } | |
2909 | ||
2910 | out1: | |
2911 | vxge_close_vpaths(vdev, 0); | |
2912 | out0: | |
2913 | vxge_debug_entryexit(VXGE_TRACE, | |
2914 | "%s: %s:%d Exiting...", | |
2915 | dev->name, __func__, __LINE__); | |
2916 | return ret; | |
2917 | } | |
2918 | ||
25985edc | 2919 | /* Loop through the mac address list and delete all the entries */ |
42821a5b | 2920 | static void vxge_free_mac_add_list(struct vxge_vpath *vpath) |
703da5a1 RV |
2921 | { |
2922 | ||
2923 | struct list_head *entry, *next; | |
2924 | if (list_empty(&vpath->mac_addr_list)) | |
2925 | return; | |
2926 | ||
2927 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { | |
2928 | list_del(entry); | |
2929 | kfree((struct vxge_mac_addrs *)entry); | |
2930 | } | |
2931 | } | |
2932 | ||
2933 | static void vxge_napi_del_all(struct vxgedev *vdev) | |
2934 | { | |
2935 | int i; | |
2936 | if (vdev->config.intr_type != MSI_X) | |
2937 | netif_napi_del(&vdev->napi); | |
2938 | else { | |
2939 | for (i = 0; i < vdev->no_of_vpath; i++) | |
2940 | netif_napi_del(&vdev->vpaths[i].ring.napi); | |
2941 | } | |
703da5a1 RV |
2942 | } |
2943 | ||
42821a5b | 2944 | static int do_vxge_close(struct net_device *dev, int do_io) |
703da5a1 RV |
2945 | { |
2946 | enum vxge_hw_status status; | |
2947 | struct vxgedev *vdev; | |
2948 | struct __vxge_hw_device *hldev; | |
2949 | int i; | |
2950 | u64 val64, vpath_vector; | |
2951 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", | |
2952 | dev->name, __func__, __LINE__); | |
2953 | ||
5f54cebb | 2954 | vdev = netdev_priv(dev); |
d8ee7071 | 2955 | hldev = pci_get_drvdata(vdev->pdev); |
703da5a1 | 2956 | |
bd9ee680 SH |
2957 | if (unlikely(!is_vxge_card_up(vdev))) |
2958 | return 0; | |
2959 | ||
703da5a1 RV |
2960 | /* If vxge_handle_crit_err task is executing, |
2961 | * wait till it completes. */ | |
2962 | while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) | |
2963 | msleep(50); | |
2964 | ||
703da5a1 RV |
2965 | if (do_io) { |
2966 | /* Put the vpath back in normal mode */ | |
2967 | vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); | |
2968 | status = vxge_hw_mgmt_reg_read(vdev->devh, | |
2969 | vxge_hw_mgmt_reg_type_mrpcim, | |
2970 | 0, | |
2971 | (ulong)offsetof( | |
2972 | struct vxge_hw_mrpcim_reg, | |
2973 | rts_mgr_cbasin_cfg), | |
2974 | &val64); | |
703da5a1 RV |
2975 | if (status == VXGE_HW_OK) { |
2976 | val64 &= ~vpath_vector; | |
2977 | status = vxge_hw_mgmt_reg_write(vdev->devh, | |
2978 | vxge_hw_mgmt_reg_type_mrpcim, | |
2979 | 0, | |
2980 | (ulong)offsetof( | |
2981 | struct vxge_hw_mrpcim_reg, | |
2982 | rts_mgr_cbasin_cfg), | |
2983 | val64); | |
2984 | } | |
2985 | ||
25985edc | 2986 | /* Remove the function 0 from promiscuous mode */ |
703da5a1 RV |
2987 | vxge_hw_mgmt_reg_write(vdev->devh, |
2988 | vxge_hw_mgmt_reg_type_mrpcim, | |
2989 | 0, | |
2990 | (ulong)offsetof(struct vxge_hw_mrpcim_reg, | |
2991 | rxmac_authorize_all_addr), | |
2992 | 0); | |
2993 | ||
2994 | vxge_hw_mgmt_reg_write(vdev->devh, | |
2995 | vxge_hw_mgmt_reg_type_mrpcim, | |
2996 | 0, | |
2997 | (ulong)offsetof(struct vxge_hw_mrpcim_reg, | |
2998 | rxmac_authorize_all_vid), | |
2999 | 0); | |
3000 | ||
3001 | smp_wmb(); | |
3002 | } | |
e7935c96 JM |
3003 | |
3004 | if (vdev->titan1) | |
3005 | del_timer_sync(&vdev->vp_lockup_timer); | |
703da5a1 RV |
3006 | |
3007 | del_timer_sync(&vdev->vp_reset_timer); | |
3008 | ||
4d2a5b40 JM |
3009 | if (do_io) |
3010 | vxge_hw_device_wait_receive_idle(hldev); | |
3011 | ||
3012 | clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); | |
3013 | ||
703da5a1 RV |
3014 | /* Disable napi */ |
3015 | if (vdev->config.intr_type != MSI_X) | |
3016 | napi_disable(&vdev->napi); | |
3017 | else { | |
3018 | for (i = 0; i < vdev->no_of_vpath; i++) | |
3019 | napi_disable(&vdev->vpaths[i].ring.napi); | |
3020 | } | |
3021 | ||
3022 | netif_carrier_off(vdev->ndev); | |
75f5e1c6 | 3023 | netdev_notice(vdev->ndev, "Link Down\n"); |
d03848e0 | 3024 | netif_tx_stop_all_queues(vdev->ndev); |
703da5a1 RV |
3025 | |
3026 | /* Note that at this point xmit() is stopped by upper layer */ | |
3027 | if (do_io) | |
3028 | vxge_hw_device_intr_disable(vdev->devh); | |
3029 | ||
703da5a1 RV |
3030 | vxge_rem_isr(vdev); |
3031 | ||
3032 | vxge_napi_del_all(vdev); | |
3033 | ||
3034 | if (do_io) | |
3035 | vxge_reset_all_vpaths(vdev); | |
3036 | ||
3037 | vxge_close_vpaths(vdev, 0); | |
3038 | ||
3039 | vxge_debug_entryexit(VXGE_TRACE, | |
3040 | "%s: %s:%d Exiting...", dev->name, __func__, __LINE__); | |
3041 | ||
703da5a1 RV |
3042 | clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); |
3043 | ||
3044 | return 0; | |
3045 | } | |
3046 | ||
3047 | /** | |
3048 | * vxge_close | |
3049 | * @dev: device pointer. | |
3050 | * | |
3051 | * This is the stop entry point of the driver. It needs to undo exactly | |
3052 | * whatever was done by the open entry point, thus it's usually referred to | |
3053 | * as the close function.Among other things this function mainly stops the | |
3054 | * Rx side of the NIC and frees all the Rx buffers in the Rx rings. | |
3055 | * Return value: '0' on success and an appropriate (-)ve integer as | |
3056 | * defined in errno.h file on failure. | |
3057 | */ | |
528f7272 | 3058 | static int vxge_close(struct net_device *dev) |
703da5a1 RV |
3059 | { |
3060 | do_vxge_close(dev, 1); | |
3061 | return 0; | |
3062 | } | |
3063 | ||
3064 | /** | |
3065 | * vxge_change_mtu | |
3066 | * @dev: net device pointer. | |
3067 | * @new_mtu :the new MTU size for the device. | |
3068 | * | |
3069 | * A driver entry point to change MTU size for the device. Before changing | |
3070 | * the MTU the device must be stopped. | |
3071 | */ | |
3072 | static int vxge_change_mtu(struct net_device *dev, int new_mtu) | |
3073 | { | |
3074 | struct vxgedev *vdev = netdev_priv(dev); | |
3075 | ||
3076 | vxge_debug_entryexit(vdev->level_trace, | |
3077 | "%s:%d", __func__, __LINE__); | |
3078 | if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) { | |
3079 | vxge_debug_init(vdev->level_err, | |
3080 | "%s: mtu size is invalid", dev->name); | |
3081 | return -EPERM; | |
3082 | } | |
3083 | ||
3084 | /* check if device is down already */ | |
3085 | if (unlikely(!is_vxge_card_up(vdev))) { | |
3086 | /* just store new value, will use later on open() */ | |
3087 | dev->mtu = new_mtu; | |
3088 | vxge_debug_init(vdev->level_err, | |
3089 | "%s", "device is down on MTU change"); | |
3090 | return 0; | |
3091 | } | |
3092 | ||
3093 | vxge_debug_init(vdev->level_trace, | |
3094 | "trying to apply new MTU %d", new_mtu); | |
3095 | ||
3096 | if (vxge_close(dev)) | |
3097 | return -EIO; | |
3098 | ||
3099 | dev->mtu = new_mtu; | |
3100 | vdev->mtu = new_mtu; | |
3101 | ||
3102 | if (vxge_open(dev)) | |
3103 | return -EIO; | |
3104 | ||
3105 | vxge_debug_init(vdev->level_trace, | |
3106 | "%s: MTU changed to %d", vdev->ndev->name, new_mtu); | |
3107 | ||
3108 | vxge_debug_entryexit(vdev->level_trace, | |
3109 | "%s:%d Exiting...", __func__, __LINE__); | |
3110 | ||
3111 | return 0; | |
3112 | } | |
3113 | ||
3114 | /** | |
dd57f970 | 3115 | * vxge_get_stats64 |
703da5a1 | 3116 | * @dev: pointer to the device structure |
dd57f970 | 3117 | * @stats: pointer to struct rtnl_link_stats64 |
703da5a1 | 3118 | * |
703da5a1 | 3119 | */ |
dd57f970 ED |
3120 | static struct rtnl_link_stats64 * |
3121 | vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) | |
703da5a1 | 3122 | { |
dd57f970 | 3123 | struct vxgedev *vdev = netdev_priv(dev); |
703da5a1 RV |
3124 | int k; |
3125 | ||
dd57f970 | 3126 | /* net_stats already zeroed by caller */ |
703da5a1 | 3127 | for (k = 0; k < vdev->no_of_vpath; k++) { |
62ea0557 | 3128 | struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats; |
3129 | struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats; | |
3130 | unsigned int start; | |
3131 | u64 packets, bytes, multicast; | |
3132 | ||
3133 | do { | |
3134 | start = u64_stats_fetch_begin(&rxstats->syncp); | |
3135 | ||
3136 | packets = rxstats->rx_frms; | |
3137 | multicast = rxstats->rx_mcast; | |
3138 | bytes = rxstats->rx_bytes; | |
3139 | } while (u64_stats_fetch_retry(&rxstats->syncp, start)); | |
3140 | ||
3141 | net_stats->rx_packets += packets; | |
3142 | net_stats->rx_bytes += bytes; | |
3143 | net_stats->multicast += multicast; | |
3144 | ||
3145 | net_stats->rx_errors += rxstats->rx_errors; | |
3146 | net_stats->rx_dropped += rxstats->rx_dropped; | |
3147 | ||
3148 | do { | |
3149 | start = u64_stats_fetch_begin(&txstats->syncp); | |
3150 | ||
3151 | packets = txstats->tx_frms; | |
3152 | bytes = txstats->tx_bytes; | |
3153 | } while (u64_stats_fetch_retry(&txstats->syncp, start)); | |
3154 | ||
3155 | net_stats->tx_packets += packets; | |
3156 | net_stats->tx_bytes += bytes; | |
3157 | net_stats->tx_errors += txstats->tx_errors; | |
703da5a1 RV |
3158 | } |
3159 | ||
3160 | return net_stats; | |
3161 | } | |
3162 | ||
cd883a79 | 3163 | static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh) |
b81b3733 JM |
3164 | { |
3165 | enum vxge_hw_status status; | |
3166 | u64 val64; | |
3167 | ||
3168 | /* Timestamp is passed to the driver via the FCS, therefore we | |
3169 | * must disable the FCS stripping by the adapter. Since this is | |
3170 | * required for the driver to load (due to a hardware bug), | |
3171 | * there is no need to do anything special here. | |
3172 | */ | |
cd883a79 JM |
3173 | val64 = VXGE_HW_XMAC_TIMESTAMP_EN | |
3174 | VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) | | |
3175 | VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0); | |
b81b3733 | 3176 | |
cd883a79 | 3177 | status = vxge_hw_mgmt_reg_write(devh, |
b81b3733 JM |
3178 | vxge_hw_mgmt_reg_type_mrpcim, |
3179 | 0, | |
3180 | offsetof(struct vxge_hw_mrpcim_reg, | |
3181 | xmac_timestamp), | |
3182 | val64); | |
cd883a79 JM |
3183 | vxge_hw_device_flush_io(devh); |
3184 | devh->config.hwts_en = VXGE_HW_HWTS_ENABLE; | |
b81b3733 JM |
3185 | return status; |
3186 | } | |
3187 | ||
3188 | static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data) | |
3189 | { | |
3190 | struct hwtstamp_config config; | |
b81b3733 JM |
3191 | int i; |
3192 | ||
3193 | if (copy_from_user(&config, data, sizeof(config))) | |
3194 | return -EFAULT; | |
3195 | ||
3196 | /* reserved for future extensions */ | |
3197 | if (config.flags) | |
3198 | return -EINVAL; | |
3199 | ||
3200 | /* Transmit HW Timestamp not supported */ | |
3201 | switch (config.tx_type) { | |
3202 | case HWTSTAMP_TX_OFF: | |
3203 | break; | |
3204 | case HWTSTAMP_TX_ON: | |
3205 | default: | |
3206 | return -ERANGE; | |
3207 | } | |
3208 | ||
3209 | switch (config.rx_filter) { | |
3210 | case HWTSTAMP_FILTER_NONE: | |
b81b3733 JM |
3211 | vdev->rx_hwts = 0; |
3212 | config.rx_filter = HWTSTAMP_FILTER_NONE; | |
3213 | break; | |
3214 | ||
3215 | case HWTSTAMP_FILTER_ALL: | |
3216 | case HWTSTAMP_FILTER_SOME: | |
3217 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: | |
3218 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: | |
3219 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: | |
3220 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: | |
3221 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | |
3222 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: | |
3223 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: | |
3224 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: | |
3225 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: | |
3226 | case HWTSTAMP_FILTER_PTP_V2_EVENT: | |
3227 | case HWTSTAMP_FILTER_PTP_V2_SYNC: | |
3228 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: | |
cd883a79 | 3229 | if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE) |
b81b3733 JM |
3230 | return -EFAULT; |
3231 | ||
3232 | vdev->rx_hwts = 1; | |
3233 | config.rx_filter = HWTSTAMP_FILTER_ALL; | |
3234 | break; | |
3235 | ||
3236 | default: | |
3237 | return -ERANGE; | |
3238 | } | |
3239 | ||
3240 | for (i = 0; i < vdev->no_of_vpath; i++) | |
3241 | vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts; | |
3242 | ||
3243 | if (copy_to_user(data, &config, sizeof(config))) | |
3244 | return -EFAULT; | |
3245 | ||
3246 | return 0; | |
3247 | } | |
3248 | ||
703da5a1 RV |
3249 | /** |
3250 | * vxge_ioctl | |
3251 | * @dev: Device pointer. | |
3252 | * @ifr: An IOCTL specific structure, that can contain a pointer to | |
3253 | * a proprietary structure used to pass information to the driver. | |
3254 | * @cmd: This is used to distinguish between the different commands that | |
3255 | * can be passed to the IOCTL functions. | |
3256 | * | |
3257 | * Entry point for the Ioctl. | |
3258 | */ | |
3259 | static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
3260 | { | |
b81b3733 JM |
3261 | struct vxgedev *vdev = netdev_priv(dev); |
3262 | int ret; | |
3263 | ||
3264 | switch (cmd) { | |
3265 | case SIOCSHWTSTAMP: | |
3266 | ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data); | |
3267 | if (ret) | |
3268 | return ret; | |
3269 | break; | |
3270 | default: | |
3271 | return -EOPNOTSUPP; | |
3272 | } | |
3273 | ||
3274 | return 0; | |
703da5a1 RV |
3275 | } |
3276 | ||
3277 | /** | |
3278 | * vxge_tx_watchdog | |
3279 | * @dev: pointer to net device structure | |
3280 | * | |
3281 | * Watchdog for transmit side. | |
3282 | * This function is triggered if the Tx Queue is stopped | |
3283 | * for a pre-defined amount of time when the Interface is still up. | |
3284 | */ | |
2e41f644 | 3285 | static void vxge_tx_watchdog(struct net_device *dev) |
703da5a1 RV |
3286 | { |
3287 | struct vxgedev *vdev; | |
3288 | ||
3289 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); | |
3290 | ||
5f54cebb | 3291 | vdev = netdev_priv(dev); |
703da5a1 RV |
3292 | |
3293 | vdev->cric_err_event = VXGE_HW_EVENT_RESET_START; | |
3294 | ||
2e41f644 | 3295 | schedule_work(&vdev->reset_task); |
703da5a1 RV |
3296 | vxge_debug_entryexit(VXGE_TRACE, |
3297 | "%s:%d Exiting...", __func__, __LINE__); | |
3298 | } | |
3299 | ||
703da5a1 RV |
3300 | /** |
3301 | * vxge_vlan_rx_add_vid | |
3302 | * @dev: net device pointer. | |
3303 | * @vid: vid | |
3304 | * | |
3305 | * Add the vlan id to the devices vlan id table | |
3306 | */ | |
8e586137 | 3307 | static int |
703da5a1 RV |
3308 | vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) |
3309 | { | |
53515734 | 3310 | struct vxgedev *vdev = netdev_priv(dev); |
703da5a1 RV |
3311 | struct vxge_vpath *vpath; |
3312 | int vp_id; | |
3313 | ||
703da5a1 RV |
3314 | /* Add these vlan to the vid table */ |
3315 | for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { | |
3316 | vpath = &vdev->vpaths[vp_id]; | |
3317 | if (!vpath->is_open) | |
3318 | continue; | |
3319 | vxge_hw_vpath_vid_add(vpath->handle, vid); | |
3320 | } | |
53515734 | 3321 | set_bit(vid, vdev->active_vlans); |
8e586137 | 3322 | return 0; |
703da5a1 RV |
3323 | } |
3324 | ||
3325 | /** | |
3326 | * vxge_vlan_rx_add_vid | |
3327 | * @dev: net device pointer. | |
3328 | * @vid: vid | |
3329 | * | |
3330 | * Remove the vlan id from the device's vlan id table | |
3331 | */ | |
8e586137 | 3332 | static int |
703da5a1 RV |
3333 | vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) |
3334 | { | |
53515734 | 3335 | struct vxgedev *vdev = netdev_priv(dev); |
703da5a1 RV |
3336 | struct vxge_vpath *vpath; |
3337 | int vp_id; | |
3338 | ||
3339 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); | |
3340 | ||
703da5a1 RV |
3341 | /* Delete this vlan from the vid table */ |
3342 | for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { | |
3343 | vpath = &vdev->vpaths[vp_id]; | |
3344 | if (!vpath->is_open) | |
3345 | continue; | |
3346 | vxge_hw_vpath_vid_delete(vpath->handle, vid); | |
3347 | } | |
3348 | vxge_debug_entryexit(VXGE_TRACE, | |
3349 | "%s:%d Exiting...", __func__, __LINE__); | |
53515734 | 3350 | clear_bit(vid, vdev->active_vlans); |
8e586137 | 3351 | return 0; |
703da5a1 RV |
3352 | } |
3353 | ||
3354 | static const struct net_device_ops vxge_netdev_ops = { | |
3355 | .ndo_open = vxge_open, | |
3356 | .ndo_stop = vxge_close, | |
dd57f970 | 3357 | .ndo_get_stats64 = vxge_get_stats64, |
703da5a1 RV |
3358 | .ndo_start_xmit = vxge_xmit, |
3359 | .ndo_validate_addr = eth_validate_addr, | |
afc4b13d | 3360 | .ndo_set_rx_mode = vxge_set_multicast, |
703da5a1 | 3361 | .ndo_do_ioctl = vxge_ioctl, |
703da5a1 RV |
3362 | .ndo_set_mac_address = vxge_set_mac_addr, |
3363 | .ndo_change_mtu = vxge_change_mtu, | |
feb990d4 MM |
3364 | .ndo_fix_features = vxge_fix_features, |
3365 | .ndo_set_features = vxge_set_features, | |
703da5a1 RV |
3366 | .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, |
3367 | .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, | |
703da5a1 RV |
3368 | .ndo_tx_timeout = vxge_tx_watchdog, |
3369 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
3370 | .ndo_poll_controller = vxge_netpoll, | |
3371 | #endif | |
3372 | }; | |
3373 | ||
42821a5b | 3374 | static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, |
3375 | struct vxge_config *config, | |
3376 | int high_dma, int no_of_vpath, | |
3377 | struct vxgedev **vdev_out) | |
703da5a1 RV |
3378 | { |
3379 | struct net_device *ndev; | |
3380 | enum vxge_hw_status status = VXGE_HW_OK; | |
3381 | struct vxgedev *vdev; | |
98f45da2 | 3382 | int ret = 0, no_of_queue = 1; |
703da5a1 RV |
3383 | u64 stat; |
3384 | ||
3385 | *vdev_out = NULL; | |
d03848e0 | 3386 | if (config->tx_steering_type) |
703da5a1 RV |
3387 | no_of_queue = no_of_vpath; |
3388 | ||
3389 | ndev = alloc_etherdev_mq(sizeof(struct vxgedev), | |
3390 | no_of_queue); | |
3391 | if (ndev == NULL) { | |
3392 | vxge_debug_init( | |
3393 | vxge_hw_device_trace_level_get(hldev), | |
3394 | "%s : device allocation failed", __func__); | |
3395 | ret = -ENODEV; | |
3396 | goto _out0; | |
3397 | } | |
3398 | ||
3399 | vxge_debug_entryexit( | |
3400 | vxge_hw_device_trace_level_get(hldev), | |
3401 | "%s: %s:%d Entering...", | |
3402 | ndev->name, __func__, __LINE__); | |
3403 | ||
3404 | vdev = netdev_priv(ndev); | |
3405 | memset(vdev, 0, sizeof(struct vxgedev)); | |
3406 | ||
3407 | vdev->ndev = ndev; | |
3408 | vdev->devh = hldev; | |
3409 | vdev->pdev = hldev->pdev; | |
3410 | memcpy(&vdev->config, config, sizeof(struct vxge_config)); | |
b81b3733 | 3411 | vdev->rx_hwts = 0; |
ff938e43 | 3412 | vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION); |
e7935c96 | 3413 | |
703da5a1 RV |
3414 | SET_NETDEV_DEV(ndev, &vdev->pdev->dev); |
3415 | ||
feb990d4 MM |
3416 | ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | |
3417 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
3418 | NETIF_F_TSO | NETIF_F_TSO6 | | |
3419 | NETIF_F_HW_VLAN_TX; | |
3420 | if (vdev->config.rth_steering != NO_STEERING) | |
3421 | ndev->hw_features |= NETIF_F_RXHASH; | |
3422 | ||
3423 | ndev->features |= ndev->hw_features | | |
3424 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; | |
3425 | ||
703da5a1 RV |
3426 | |
3427 | ndev->netdev_ops = &vxge_netdev_ops; | |
3428 | ||
3429 | ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; | |
2e41f644 | 3430 | INIT_WORK(&vdev->reset_task, vxge_reset); |
703da5a1 | 3431 | |
42821a5b | 3432 | vxge_initialize_ethtool_ops(ndev); |
703da5a1 RV |
3433 | |
3434 | /* Allocate memory for vpath */ | |
3435 | vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * | |
3436 | no_of_vpath, GFP_KERNEL); | |
3437 | if (!vdev->vpaths) { | |
3438 | vxge_debug_init(VXGE_ERR, | |
3439 | "%s: vpath memory allocation failed", | |
3440 | vdev->ndev->name); | |
6cca2003 | 3441 | ret = -ENOMEM; |
703da5a1 RV |
3442 | goto _out1; |
3443 | } | |
3444 | ||
703da5a1 RV |
3445 | vxge_debug_init(vxge_hw_device_trace_level_get(hldev), |
3446 | "%s : checksuming enabled", __func__); | |
3447 | ||
3448 | if (high_dma) { | |
3449 | ndev->features |= NETIF_F_HIGHDMA; | |
3450 | vxge_debug_init(vxge_hw_device_trace_level_get(hldev), | |
3451 | "%s : using High DMA", __func__); | |
3452 | } | |
3453 | ||
6cca2003 JM |
3454 | ret = register_netdev(ndev); |
3455 | if (ret) { | |
703da5a1 RV |
3456 | vxge_debug_init(vxge_hw_device_trace_level_get(hldev), |
3457 | "%s: %s : device registration failed!", | |
3458 | ndev->name, __func__); | |
703da5a1 RV |
3459 | goto _out2; |
3460 | } | |
3461 | ||
3462 | /* Set the factory defined MAC address initially */ | |
3463 | ndev->addr_len = ETH_ALEN; | |
3464 | ||
3465 | /* Make Link state as off at this point, when the Link change | |
3466 | * interrupt comes the state will be automatically changed to | |
3467 | * the right state. | |
3468 | */ | |
3469 | netif_carrier_off(ndev); | |
3470 | ||
3471 | vxge_debug_init(vxge_hw_device_trace_level_get(hldev), | |
3472 | "%s: Ethernet device registered", | |
3473 | ndev->name); | |
3474 | ||
e8ac1756 | 3475 | hldev->ndev = ndev; |
703da5a1 RV |
3476 | *vdev_out = vdev; |
3477 | ||
3478 | /* Resetting the Device stats */ | |
3479 | status = vxge_hw_mrpcim_stats_access( | |
3480 | hldev, | |
3481 | VXGE_HW_STATS_OP_CLEAR_ALL_STATS, | |
3482 | 0, | |
3483 | 0, | |
3484 | &stat); | |
3485 | ||
3486 | if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION) | |
3487 | vxge_debug_init( | |
3488 | vxge_hw_device_trace_level_get(hldev), | |
3489 | "%s: device stats clear returns" | |
3490 | "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name); | |
3491 | ||
3492 | vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev), | |
3493 | "%s: %s:%d Exiting...", | |
3494 | ndev->name, __func__, __LINE__); | |
3495 | ||
3496 | return ret; | |
3497 | _out2: | |
3498 | kfree(vdev->vpaths); | |
3499 | _out1: | |
3500 | free_netdev(ndev); | |
3501 | _out0: | |
3502 | return ret; | |
3503 | } | |
3504 | ||
3505 | /* | |
3506 | * vxge_device_unregister | |
3507 | * | |
3508 | * This function will unregister and free network device | |
3509 | */ | |
2c91308f | 3510 | static void vxge_device_unregister(struct __vxge_hw_device *hldev) |
703da5a1 RV |
3511 | { |
3512 | struct vxgedev *vdev; | |
3513 | struct net_device *dev; | |
3514 | char buf[IFNAMSIZ]; | |
703da5a1 RV |
3515 | |
3516 | dev = hldev->ndev; | |
3517 | vdev = netdev_priv(dev); | |
703da5a1 | 3518 | |
2c91308f JM |
3519 | vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name, |
3520 | __func__, __LINE__); | |
3521 | ||
ead5d238 | 3522 | strncpy(buf, dev->name, IFNAMSIZ); |
703da5a1 | 3523 | |
ba27d85c TH |
3524 | flush_work_sync(&vdev->reset_task); |
3525 | ||
703da5a1 RV |
3526 | /* in 2.6 will call stop() if device is up */ |
3527 | unregister_netdev(dev); | |
3528 | ||
6cca2003 JM |
3529 | kfree(vdev->vpaths); |
3530 | ||
3531 | /* we are safe to free it now */ | |
3532 | free_netdev(dev); | |
3533 | ||
2c91308f JM |
3534 | vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", |
3535 | buf); | |
3536 | vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, | |
3537 | __func__, __LINE__); | |
703da5a1 RV |
3538 | } |
3539 | ||
3540 | /* | |
3541 | * vxge_callback_crit_err | |
3542 | * | |
3543 | * This function is called by the alarm handler in interrupt context. | |
3544 | * Driver must analyze it based on the event type. | |
3545 | */ | |
3546 | static void | |
3547 | vxge_callback_crit_err(struct __vxge_hw_device *hldev, | |
3548 | enum vxge_hw_event type, u64 vp_id) | |
3549 | { | |
3550 | struct net_device *dev = hldev->ndev; | |
5f54cebb | 3551 | struct vxgedev *vdev = netdev_priv(dev); |
98f45da2 | 3552 | struct vxge_vpath *vpath = NULL; |
703da5a1 RV |
3553 | int vpath_idx; |
3554 | ||
3555 | vxge_debug_entryexit(vdev->level_trace, | |
3556 | "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); | |
3557 | ||
3558 | /* Note: This event type should be used for device wide | |
3559 | * indications only - Serious errors, Slot freeze and critical errors | |
3560 | */ | |
3561 | vdev->cric_err_event = type; | |
3562 | ||
98f45da2 JM |
3563 | for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { |
3564 | vpath = &vdev->vpaths[vpath_idx]; | |
3565 | if (vpath->device_id == vp_id) | |
703da5a1 | 3566 | break; |
98f45da2 | 3567 | } |
703da5a1 RV |
3568 | |
3569 | if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) { | |
3570 | if (type == VXGE_HW_EVENT_SLOT_FREEZE) { | |
3571 | vxge_debug_init(VXGE_ERR, | |
3572 | "%s: Slot is frozen", vdev->ndev->name); | |
3573 | } else if (type == VXGE_HW_EVENT_SERR) { | |
3574 | vxge_debug_init(VXGE_ERR, | |
3575 | "%s: Encountered Serious Error", | |
3576 | vdev->ndev->name); | |
3577 | } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) | |
3578 | vxge_debug_init(VXGE_ERR, | |
3579 | "%s: Encountered Critical Error", | |
3580 | vdev->ndev->name); | |
3581 | } | |
3582 | ||
3583 | if ((type == VXGE_HW_EVENT_SERR) || | |
3584 | (type == VXGE_HW_EVENT_SLOT_FREEZE)) { | |
3585 | if (unlikely(vdev->exec_mode)) | |
3586 | clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); | |
3587 | } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) { | |
3588 | vxge_hw_device_mask_all(hldev); | |
3589 | if (unlikely(vdev->exec_mode)) | |
3590 | clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); | |
3591 | } else if ((type == VXGE_HW_EVENT_FIFO_ERR) || | |
3592 | (type == VXGE_HW_EVENT_VPATH_ERR)) { | |
3593 | ||
3594 | if (unlikely(vdev->exec_mode)) | |
3595 | clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); | |
3596 | else { | |
3597 | /* check if this vpath is already set for reset */ | |
3598 | if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) { | |
3599 | ||
3600 | /* disable interrupts for this vpath */ | |
3601 | vxge_vpath_intr_disable(vdev, vpath_idx); | |
3602 | ||
3603 | /* stop the queue for this vpath */ | |
98f45da2 | 3604 | netif_tx_stop_queue(vpath->fifo.txq); |
703da5a1 RV |
3605 | } |
3606 | } | |
3607 | } | |
3608 | ||
3609 | vxge_debug_entryexit(vdev->level_trace, | |
3610 | "%s: %s:%d Exiting...", | |
3611 | vdev->ndev->name, __func__, __LINE__); | |
3612 | } | |
3613 | ||
3614 | static void verify_bandwidth(void) | |
3615 | { | |
3616 | int i, band_width, total = 0, equal_priority = 0; | |
3617 | ||
3618 | /* 1. If user enters 0 for some fifo, give equal priority to all */ | |
3619 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
3620 | if (bw_percentage[i] == 0) { | |
3621 | equal_priority = 1; | |
3622 | break; | |
3623 | } | |
3624 | } | |
3625 | ||
3626 | if (!equal_priority) { | |
3627 | /* 2. If sum exceeds 100, give equal priority to all */ | |
3628 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
3629 | if (bw_percentage[i] == 0xFF) | |
3630 | break; | |
3631 | ||
3632 | total += bw_percentage[i]; | |
3633 | if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) { | |
3634 | equal_priority = 1; | |
3635 | break; | |
3636 | } | |
3637 | } | |
3638 | } | |
3639 | ||
3640 | if (!equal_priority) { | |
3641 | /* Is all the bandwidth consumed? */ | |
3642 | if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) { | |
3643 | if (i < VXGE_HW_MAX_VIRTUAL_PATHS) { | |
3644 | /* Split rest of bw equally among next VPs*/ | |
3645 | band_width = | |
3646 | (VXGE_HW_VPATH_BANDWIDTH_MAX - total) / | |
3647 | (VXGE_HW_MAX_VIRTUAL_PATHS - i); | |
3648 | if (band_width < 2) /* min of 2% */ | |
3649 | equal_priority = 1; | |
3650 | else { | |
3651 | for (; i < VXGE_HW_MAX_VIRTUAL_PATHS; | |
3652 | i++) | |
3653 | bw_percentage[i] = | |
3654 | band_width; | |
3655 | } | |
3656 | } | |
3657 | } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS) | |
3658 | equal_priority = 1; | |
3659 | } | |
3660 | ||
3661 | if (equal_priority) { | |
3662 | vxge_debug_init(VXGE_ERR, | |
3663 | "%s: Assigning equal bandwidth to all the vpaths", | |
3664 | VXGE_DRIVER_NAME); | |
3665 | bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX / | |
3666 | VXGE_HW_MAX_VIRTUAL_PATHS; | |
3667 | for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) | |
3668 | bw_percentage[i] = bw_percentage[0]; | |
3669 | } | |
703da5a1 RV |
3670 | } |
3671 | ||
3672 | /* | |
3673 | * Vpath configuration | |
3674 | */ | |
3675 | static int __devinit vxge_config_vpaths( | |
3676 | struct vxge_hw_device_config *device_config, | |
3677 | u64 vpath_mask, struct vxge_config *config_param) | |
3678 | { | |
3679 | int i, no_of_vpaths = 0, default_no_vpath = 0, temp; | |
3680 | u32 txdl_size, txdl_per_memblock; | |
3681 | ||
3682 | temp = driver_config->vpath_per_dev; | |
3683 | if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) && | |
3684 | (max_config_dev == VXGE_MAX_CONFIG_DEV)) { | |
3685 | /* No more CPU. Return vpath number as zero.*/ | |
3686 | if (driver_config->g_no_cpus == -1) | |
3687 | return 0; | |
3688 | ||
3689 | if (!driver_config->g_no_cpus) | |
3690 | driver_config->g_no_cpus = num_online_cpus(); | |
3691 | ||
3692 | driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1; | |
3693 | if (!driver_config->vpath_per_dev) | |
3694 | driver_config->vpath_per_dev = 1; | |
3695 | ||
3696 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) | |
3697 | if (!vxge_bVALn(vpath_mask, i, 1)) | |
3698 | continue; | |
3699 | else | |
3700 | default_no_vpath++; | |
3701 | if (default_no_vpath < driver_config->vpath_per_dev) | |
3702 | driver_config->vpath_per_dev = default_no_vpath; | |
3703 | ||
3704 | driver_config->g_no_cpus = driver_config->g_no_cpus - | |
3705 | (driver_config->vpath_per_dev * 2); | |
3706 | if (driver_config->g_no_cpus <= 0) | |
3707 | driver_config->g_no_cpus = -1; | |
3708 | } | |
3709 | ||
3710 | if (driver_config->vpath_per_dev == 1) { | |
3711 | vxge_debug_ll_config(VXGE_TRACE, | |
3712 | "%s: Disable tx and rx steering, " | |
3713 | "as single vpath is configured", VXGE_DRIVER_NAME); | |
3714 | config_param->rth_steering = NO_STEERING; | |
3715 | config_param->tx_steering_type = NO_STEERING; | |
3716 | device_config->rth_en = 0; | |
3717 | } | |
3718 | ||
3719 | /* configure bandwidth */ | |
3720 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) | |
3721 | device_config->vp_config[i].min_bandwidth = bw_percentage[i]; | |
3722 | ||
3723 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
3724 | device_config->vp_config[i].vp_id = i; | |
3725 | device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU; | |
3726 | if (no_of_vpaths < driver_config->vpath_per_dev) { | |
3727 | if (!vxge_bVALn(vpath_mask, i, 1)) { | |
3728 | vxge_debug_ll_config(VXGE_TRACE, | |
3729 | "%s: vpath: %d is not available", | |
3730 | VXGE_DRIVER_NAME, i); | |
3731 | continue; | |
3732 | } else { | |
3733 | vxge_debug_ll_config(VXGE_TRACE, | |
3734 | "%s: vpath: %d available", | |
3735 | VXGE_DRIVER_NAME, i); | |
3736 | no_of_vpaths++; | |
3737 | } | |
3738 | } else { | |
3739 | vxge_debug_ll_config(VXGE_TRACE, | |
3740 | "%s: vpath: %d is not configured, " | |
3741 | "max_config_vpath exceeded", | |
3742 | VXGE_DRIVER_NAME, i); | |
3743 | break; | |
3744 | } | |
3745 | ||
3746 | /* Configure Tx fifo's */ | |
3747 | device_config->vp_config[i].fifo.enable = | |
3748 | VXGE_HW_FIFO_ENABLE; | |
3749 | device_config->vp_config[i].fifo.max_frags = | |
5beefb4f | 3750 | MAX_SKB_FRAGS + 1; |
703da5a1 RV |
3751 | device_config->vp_config[i].fifo.memblock_size = |
3752 | VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE; | |
3753 | ||
5beefb4f SH |
3754 | txdl_size = device_config->vp_config[i].fifo.max_frags * |
3755 | sizeof(struct vxge_hw_fifo_txd); | |
703da5a1 RV |
3756 | txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size; |
3757 | ||
3758 | device_config->vp_config[i].fifo.fifo_blocks = | |
3759 | ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1; | |
3760 | ||
3761 | device_config->vp_config[i].fifo.intr = | |
3762 | VXGE_HW_FIFO_QUEUE_INTR_DISABLE; | |
3763 | ||
3764 | /* Configure tti properties */ | |
3765 | device_config->vp_config[i].tti.intr_enable = | |
3766 | VXGE_HW_TIM_INTR_ENABLE; | |
3767 | ||
3768 | device_config->vp_config[i].tti.btimer_val = | |
3769 | (VXGE_TTI_BTIMER_VAL * 1000) / 272; | |
3770 | ||
3771 | device_config->vp_config[i].tti.timer_ac_en = | |
3772 | VXGE_HW_TIM_TIMER_AC_ENABLE; | |
3773 | ||
528f7272 JM |
3774 | /* For msi-x with napi (each vector has a handler of its own) - |
3775 | * Set CI to OFF for all vpaths | |
3776 | */ | |
703da5a1 RV |
3777 | device_config->vp_config[i].tti.timer_ci_en = |
3778 | VXGE_HW_TIM_TIMER_CI_DISABLE; | |
3779 | ||
3780 | device_config->vp_config[i].tti.timer_ri_en = | |
3781 | VXGE_HW_TIM_TIMER_RI_DISABLE; | |
3782 | ||
3783 | device_config->vp_config[i].tti.util_sel = | |
3784 | VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL; | |
3785 | ||
3786 | device_config->vp_config[i].tti.ltimer_val = | |
3787 | (VXGE_TTI_LTIMER_VAL * 1000) / 272; | |
3788 | ||
3789 | device_config->vp_config[i].tti.rtimer_val = | |
3790 | (VXGE_TTI_RTIMER_VAL * 1000) / 272; | |
3791 | ||
3792 | device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A; | |
3793 | device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B; | |
3794 | device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C; | |
3795 | device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A; | |
3796 | device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B; | |
3797 | device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C; | |
3798 | device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D; | |
3799 | ||
3800 | /* Configure Rx rings */ | |
3801 | device_config->vp_config[i].ring.enable = | |
3802 | VXGE_HW_RING_ENABLE; | |
3803 | ||
3804 | device_config->vp_config[i].ring.ring_blocks = | |
3805 | VXGE_HW_DEF_RING_BLOCKS; | |
528f7272 | 3806 | |
703da5a1 RV |
3807 | device_config->vp_config[i].ring.buffer_mode = |
3808 | VXGE_HW_RING_RXD_BUFFER_MODE_1; | |
528f7272 | 3809 | |
703da5a1 RV |
3810 | device_config->vp_config[i].ring.rxds_limit = |
3811 | VXGE_HW_DEF_RING_RXDS_LIMIT; | |
528f7272 | 3812 | |
703da5a1 RV |
3813 | device_config->vp_config[i].ring.scatter_mode = |
3814 | VXGE_HW_RING_SCATTER_MODE_A; | |
3815 | ||
3816 | /* Configure rti properties */ | |
3817 | device_config->vp_config[i].rti.intr_enable = | |
3818 | VXGE_HW_TIM_INTR_ENABLE; | |
3819 | ||
3820 | device_config->vp_config[i].rti.btimer_val = | |
3821 | (VXGE_RTI_BTIMER_VAL * 1000)/272; | |
3822 | ||
3823 | device_config->vp_config[i].rti.timer_ac_en = | |
3824 | VXGE_HW_TIM_TIMER_AC_ENABLE; | |
3825 | ||
3826 | device_config->vp_config[i].rti.timer_ci_en = | |
3827 | VXGE_HW_TIM_TIMER_CI_DISABLE; | |
3828 | ||
3829 | device_config->vp_config[i].rti.timer_ri_en = | |
3830 | VXGE_HW_TIM_TIMER_RI_DISABLE; | |
3831 | ||
3832 | device_config->vp_config[i].rti.util_sel = | |
3833 | VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL; | |
3834 | ||
3835 | device_config->vp_config[i].rti.urange_a = | |
3836 | RTI_RX_URANGE_A; | |
3837 | device_config->vp_config[i].rti.urange_b = | |
3838 | RTI_RX_URANGE_B; | |
3839 | device_config->vp_config[i].rti.urange_c = | |
3840 | RTI_RX_URANGE_C; | |
3841 | device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A; | |
3842 | device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B; | |
3843 | device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C; | |
3844 | device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D; | |
3845 | ||
3846 | device_config->vp_config[i].rti.rtimer_val = | |
3847 | (VXGE_RTI_RTIMER_VAL * 1000) / 272; | |
3848 | ||
3849 | device_config->vp_config[i].rti.ltimer_val = | |
3850 | (VXGE_RTI_LTIMER_VAL * 1000) / 272; | |
3851 | ||
3852 | device_config->vp_config[i].rpa_strip_vlan_tag = | |
3853 | vlan_tag_strip; | |
3854 | } | |
3855 | ||
3856 | driver_config->vpath_per_dev = temp; | |
3857 | return no_of_vpaths; | |
3858 | } | |
3859 | ||
3860 | /* initialize device configuratrions */ | |
3861 | static void __devinit vxge_device_config_init( | |
3862 | struct vxge_hw_device_config *device_config, | |
3863 | int *intr_type) | |
3864 | { | |
3865 | /* Used for CQRQ/SRQ. */ | |
3866 | device_config->dma_blockpool_initial = | |
3867 | VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE; | |
3868 | ||
3869 | device_config->dma_blockpool_max = | |
3870 | VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE; | |
3871 | ||
3872 | if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT) | |
3873 | max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT; | |
3874 | ||
3875 | #ifndef CONFIG_PCI_MSI | |
3876 | vxge_debug_init(VXGE_ERR, | |
3877 | "%s: This Kernel does not support " | |
3878 | "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); | |
3879 | *intr_type = INTA; | |
3880 | #endif | |
3881 | ||
3882 | /* Configure whether MSI-X or IRQL. */ | |
3883 | switch (*intr_type) { | |
3884 | case INTA: | |
3885 | device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE; | |
3886 | break; | |
3887 | ||
3888 | case MSI_X: | |
16fded7d | 3889 | device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT; |
703da5a1 RV |
3890 | break; |
3891 | } | |
528f7272 | 3892 | |
703da5a1 RV |
3893 | /* Timer period between device poll */ |
3894 | device_config->device_poll_millis = VXGE_TIMER_DELAY; | |
3895 | ||
3896 | /* Configure mac based steering. */ | |
3897 | device_config->rts_mac_en = addr_learn_en; | |
3898 | ||
3899 | /* Configure Vpaths */ | |
3900 | device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT; | |
3901 | ||
3902 | vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", | |
3903 | __func__); | |
703da5a1 RV |
3904 | vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", |
3905 | device_config->intr_mode); | |
3906 | vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", | |
3907 | device_config->device_poll_millis); | |
703da5a1 RV |
3908 | vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", |
3909 | device_config->rth_en); | |
3910 | vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", | |
3911 | device_config->rth_it_type); | |
3912 | } | |
3913 | ||
3914 | static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask) | |
3915 | { | |
3916 | int i; | |
3917 | ||
3918 | vxge_debug_init(VXGE_TRACE, | |
3919 | "%s: %d Vpath(s) opened", | |
3920 | vdev->ndev->name, vdev->no_of_vpath); | |
3921 | ||
3922 | switch (vdev->config.intr_type) { | |
3923 | case INTA: | |
3924 | vxge_debug_init(VXGE_TRACE, | |
3925 | "%s: Interrupt type INTA", vdev->ndev->name); | |
3926 | break; | |
3927 | ||
3928 | case MSI_X: | |
3929 | vxge_debug_init(VXGE_TRACE, | |
3930 | "%s: Interrupt type MSI-X", vdev->ndev->name); | |
3931 | break; | |
3932 | } | |
3933 | ||
3934 | if (vdev->config.rth_steering) { | |
3935 | vxge_debug_init(VXGE_TRACE, | |
3936 | "%s: RTH steering enabled for TCP_IPV4", | |
3937 | vdev->ndev->name); | |
3938 | } else { | |
3939 | vxge_debug_init(VXGE_TRACE, | |
3940 | "%s: RTH steering disabled", vdev->ndev->name); | |
3941 | } | |
3942 | ||
3943 | switch (vdev->config.tx_steering_type) { | |
3944 | case NO_STEERING: | |
3945 | vxge_debug_init(VXGE_TRACE, | |
3946 | "%s: Tx steering disabled", vdev->ndev->name); | |
3947 | break; | |
3948 | case TX_PRIORITY_STEERING: | |
3949 | vxge_debug_init(VXGE_TRACE, | |
3950 | "%s: Unsupported tx steering option", | |
3951 | vdev->ndev->name); | |
3952 | vxge_debug_init(VXGE_TRACE, | |
3953 | "%s: Tx steering disabled", vdev->ndev->name); | |
3954 | vdev->config.tx_steering_type = 0; | |
3955 | break; | |
3956 | case TX_VLAN_STEERING: | |
3957 | vxge_debug_init(VXGE_TRACE, | |
3958 | "%s: Unsupported tx steering option", | |
3959 | vdev->ndev->name); | |
3960 | vxge_debug_init(VXGE_TRACE, | |
3961 | "%s: Tx steering disabled", vdev->ndev->name); | |
3962 | vdev->config.tx_steering_type = 0; | |
3963 | break; | |
3964 | case TX_MULTIQ_STEERING: | |
3965 | vxge_debug_init(VXGE_TRACE, | |
3966 | "%s: Tx multiqueue steering enabled", | |
3967 | vdev->ndev->name); | |
3968 | break; | |
3969 | case TX_PORT_STEERING: | |
3970 | vxge_debug_init(VXGE_TRACE, | |
3971 | "%s: Tx port steering enabled", | |
3972 | vdev->ndev->name); | |
3973 | break; | |
3974 | default: | |
3975 | vxge_debug_init(VXGE_ERR, | |
3976 | "%s: Unsupported tx steering type", | |
3977 | vdev->ndev->name); | |
3978 | vxge_debug_init(VXGE_TRACE, | |
3979 | "%s: Tx steering disabled", vdev->ndev->name); | |
3980 | vdev->config.tx_steering_type = 0; | |
3981 | } | |
3982 | ||
703da5a1 RV |
3983 | if (vdev->config.addr_learn_en) |
3984 | vxge_debug_init(VXGE_TRACE, | |
3985 | "%s: MAC Address learning enabled", vdev->ndev->name); | |
3986 | ||
703da5a1 RV |
3987 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { |
3988 | if (!vxge_bVALn(vpath_mask, i, 1)) | |
3989 | continue; | |
3990 | vxge_debug_ll_config(VXGE_TRACE, | |
3991 | "%s: MTU size - %d", vdev->ndev->name, | |
64699336 | 3992 | ((vdev->devh))-> |
703da5a1 RV |
3993 | config.vp_config[i].mtu); |
3994 | vxge_debug_init(VXGE_TRACE, | |
3995 | "%s: VLAN tag stripping %s", vdev->ndev->name, | |
64699336 | 3996 | ((vdev->devh))-> |
703da5a1 RV |
3997 | config.vp_config[i].rpa_strip_vlan_tag |
3998 | ? "Enabled" : "Disabled"); | |
703da5a1 RV |
3999 | vxge_debug_ll_config(VXGE_TRACE, |
4000 | "%s: Max frags : %d", vdev->ndev->name, | |
64699336 | 4001 | ((vdev->devh))-> |
703da5a1 RV |
4002 | config.vp_config[i].fifo.max_frags); |
4003 | break; | |
4004 | } | |
4005 | } | |
4006 | ||
4007 | #ifdef CONFIG_PM | |
4008 | /** | |
4009 | * vxge_pm_suspend - vxge power management suspend entry point | |
4010 | * | |
4011 | */ | |
4012 | static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state) | |
4013 | { | |
4014 | return -ENOSYS; | |
4015 | } | |
4016 | /** | |
4017 | * vxge_pm_resume - vxge power management resume entry point | |
4018 | * | |
4019 | */ | |
4020 | static int vxge_pm_resume(struct pci_dev *pdev) | |
4021 | { | |
4022 | return -ENOSYS; | |
4023 | } | |
4024 | ||
4025 | #endif | |
4026 | ||
4027 | /** | |
4028 | * vxge_io_error_detected - called when PCI error is detected | |
4029 | * @pdev: Pointer to PCI device | |
4030 | * @state: The current pci connection state | |
4031 | * | |
4032 | * This function is called after a PCI bus error affecting | |
4033 | * this device has been detected. | |
4034 | */ | |
4035 | static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, | |
4036 | pci_channel_state_t state) | |
4037 | { | |
d8ee7071 | 4038 | struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); |
703da5a1 RV |
4039 | struct net_device *netdev = hldev->ndev; |
4040 | ||
4041 | netif_device_detach(netdev); | |
4042 | ||
e33b992d DN |
4043 | if (state == pci_channel_io_perm_failure) |
4044 | return PCI_ERS_RESULT_DISCONNECT; | |
4045 | ||
703da5a1 RV |
4046 | if (netif_running(netdev)) { |
4047 | /* Bring down the card, while avoiding PCI I/O */ | |
4048 | do_vxge_close(netdev, 0); | |
4049 | } | |
4050 | ||
4051 | pci_disable_device(pdev); | |
4052 | ||
4053 | return PCI_ERS_RESULT_NEED_RESET; | |
4054 | } | |
4055 | ||
4056 | /** | |
4057 | * vxge_io_slot_reset - called after the pci bus has been reset. | |
4058 | * @pdev: Pointer to PCI device | |
4059 | * | |
4060 | * Restart the card from scratch, as if from a cold-boot. | |
4061 | * At this point, the card has exprienced a hard reset, | |
4062 | * followed by fixups by BIOS, and has its config space | |
4063 | * set up identically to what it was at cold boot. | |
4064 | */ | |
4065 | static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) | |
4066 | { | |
d8ee7071 | 4067 | struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); |
703da5a1 RV |
4068 | struct net_device *netdev = hldev->ndev; |
4069 | ||
4070 | struct vxgedev *vdev = netdev_priv(netdev); | |
4071 | ||
4072 | if (pci_enable_device(pdev)) { | |
75f5e1c6 | 4073 | netdev_err(netdev, "Cannot re-enable device after reset\n"); |
703da5a1 RV |
4074 | return PCI_ERS_RESULT_DISCONNECT; |
4075 | } | |
4076 | ||
4077 | pci_set_master(pdev); | |
528f7272 | 4078 | do_vxge_reset(vdev, VXGE_LL_FULL_RESET); |
703da5a1 RV |
4079 | |
4080 | return PCI_ERS_RESULT_RECOVERED; | |
4081 | } | |
4082 | ||
4083 | /** | |
4084 | * vxge_io_resume - called when traffic can start flowing again. | |
4085 | * @pdev: Pointer to PCI device | |
4086 | * | |
4087 | * This callback is called when the error recovery driver tells | |
4088 | * us that its OK to resume normal operation. | |
4089 | */ | |
4090 | static void vxge_io_resume(struct pci_dev *pdev) | |
4091 | { | |
d8ee7071 | 4092 | struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); |
703da5a1 RV |
4093 | struct net_device *netdev = hldev->ndev; |
4094 | ||
4095 | if (netif_running(netdev)) { | |
4096 | if (vxge_open(netdev)) { | |
75f5e1c6 JP |
4097 | netdev_err(netdev, |
4098 | "Can't bring device back up after reset\n"); | |
703da5a1 RV |
4099 | return; |
4100 | } | |
4101 | } | |
4102 | ||
4103 | netif_device_attach(netdev); | |
4104 | } | |
4105 | ||
cb27ec60 SH |
4106 | static inline u32 vxge_get_num_vfs(u64 function_mode) |
4107 | { | |
4108 | u32 num_functions = 0; | |
4109 | ||
4110 | switch (function_mode) { | |
4111 | case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION: | |
4112 | case VXGE_HW_FUNCTION_MODE_SRIOV_8: | |
4113 | num_functions = 8; | |
4114 | break; | |
4115 | case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: | |
4116 | num_functions = 1; | |
4117 | break; | |
4118 | case VXGE_HW_FUNCTION_MODE_SRIOV: | |
4119 | case VXGE_HW_FUNCTION_MODE_MRIOV: | |
4120 | case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17: | |
4121 | num_functions = 17; | |
4122 | break; | |
4123 | case VXGE_HW_FUNCTION_MODE_SRIOV_4: | |
4124 | num_functions = 4; | |
4125 | break; | |
4126 | case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2: | |
4127 | num_functions = 2; | |
4128 | break; | |
4129 | case VXGE_HW_FUNCTION_MODE_MRIOV_8: | |
4130 | num_functions = 8; /* TODO */ | |
4131 | break; | |
4132 | } | |
4133 | return num_functions; | |
4134 | } | |
4135 | ||
e8ac1756 JM |
4136 | int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override) |
4137 | { | |
4138 | struct __vxge_hw_device *hldev = vdev->devh; | |
4139 | u32 maj, min, bld, cmaj, cmin, cbld; | |
4140 | enum vxge_hw_status status; | |
4141 | const struct firmware *fw; | |
4142 | int ret; | |
4143 | ||
4144 | ret = request_firmware(&fw, fw_name, &vdev->pdev->dev); | |
4145 | if (ret) { | |
4146 | vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found", | |
4147 | VXGE_DRIVER_NAME, fw_name); | |
4148 | goto out; | |
4149 | } | |
4150 | ||
4151 | /* Load the new firmware onto the adapter */ | |
4152 | status = vxge_update_fw_image(hldev, fw->data, fw->size); | |
4153 | if (status != VXGE_HW_OK) { | |
4154 | vxge_debug_init(VXGE_ERR, | |
4155 | "%s: FW image download to adapter failed '%s'.", | |
4156 | VXGE_DRIVER_NAME, fw_name); | |
4157 | ret = -EIO; | |
4158 | goto out; | |
4159 | } | |
4160 | ||
4161 | /* Read the version of the new firmware */ | |
4162 | status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld); | |
4163 | if (status != VXGE_HW_OK) { | |
4164 | vxge_debug_init(VXGE_ERR, | |
4165 | "%s: Upgrade read version failed '%s'.", | |
4166 | VXGE_DRIVER_NAME, fw_name); | |
4167 | ret = -EIO; | |
4168 | goto out; | |
4169 | } | |
4170 | ||
4171 | cmaj = vdev->config.device_hw_info.fw_version.major; | |
4172 | cmin = vdev->config.device_hw_info.fw_version.minor; | |
4173 | cbld = vdev->config.device_hw_info.fw_version.build; | |
4174 | /* It's possible the version in /lib/firmware is not the latest version. | |
4175 | * If so, we could get into a loop of trying to upgrade to the latest | |
4176 | * and flashing the older version. | |
4177 | */ | |
4178 | if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) && | |
4179 | !override) { | |
4180 | ret = -EINVAL; | |
4181 | goto out; | |
4182 | } | |
4183 | ||
4184 | printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n", | |
4185 | maj, min, bld); | |
4186 | ||
4187 | /* Flash the adapter with the new firmware */ | |
4188 | status = vxge_hw_flash_fw(hldev); | |
4189 | if (status != VXGE_HW_OK) { | |
4190 | vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.", | |
4191 | VXGE_DRIVER_NAME, fw_name); | |
4192 | ret = -EIO; | |
4193 | goto out; | |
4194 | } | |
4195 | ||
4196 | printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be " | |
4197 | "hard reset before using, thus requiring a system reboot or a " | |
4198 | "hotplug event.\n"); | |
4199 | ||
4200 | out: | |
e84f885e | 4201 | release_firmware(fw); |
e8ac1756 JM |
4202 | return ret; |
4203 | } | |
4204 | ||
4205 | static int vxge_probe_fw_update(struct vxgedev *vdev) | |
4206 | { | |
4207 | u32 maj, min, bld; | |
4208 | int ret, gpxe = 0; | |
4209 | char *fw_name; | |
4210 | ||
4211 | maj = vdev->config.device_hw_info.fw_version.major; | |
4212 | min = vdev->config.device_hw_info.fw_version.minor; | |
4213 | bld = vdev->config.device_hw_info.fw_version.build; | |
4214 | ||
4215 | if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER) | |
4216 | return 0; | |
4217 | ||
4218 | /* Ignore the build number when determining if the current firmware is | |
4219 | * "too new" to load the driver | |
4220 | */ | |
4221 | if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) { | |
4222 | vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known " | |
4223 | "version, unable to load driver\n", | |
4224 | VXGE_DRIVER_NAME); | |
4225 | return -EINVAL; | |
4226 | } | |
4227 | ||
4228 | /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to | |
4229 | * work with this driver. | |
4230 | */ | |
4231 | if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) { | |
4232 | vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be " | |
4233 | "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld); | |
4234 | return -EINVAL; | |
4235 | } | |
4236 | ||
4237 | /* If file not specified, determine gPXE or not */ | |
4238 | if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) { | |
4239 | int i; | |
4240 | for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) | |
4241 | if (vdev->devh->eprom_versions[i]) { | |
4242 | gpxe = 1; | |
4243 | break; | |
4244 | } | |
4245 | } | |
4246 | if (gpxe) | |
4247 | fw_name = "vxge/X3fw-pxe.ncf"; | |
4248 | else | |
4249 | fw_name = "vxge/X3fw.ncf"; | |
4250 | ||
4251 | ret = vxge_fw_upgrade(vdev, fw_name, 0); | |
4252 | /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on | |
4253 | * probe, so ignore them | |
4254 | */ | |
4255 | if (ret != -EINVAL && ret != -ENOENT) | |
4256 | return -EIO; | |
4257 | else | |
4258 | ret = 0; | |
4259 | ||
4260 | if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) > | |
4261 | VXGE_FW_VER(maj, min, 0)) { | |
4262 | vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to" | |
4263 | " be used with this driver.\n" | |
4264 | "Please get the latest version from " | |
4265 | "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE", | |
4266 | VXGE_DRIVER_NAME, maj, min, bld); | |
4267 | return -EINVAL; | |
4268 | } | |
4269 | ||
4270 | return ret; | |
4271 | } | |
4272 | ||
c92bf70d JM |
4273 | static int __devinit is_sriov_initialized(struct pci_dev *pdev) |
4274 | { | |
4275 | int pos; | |
4276 | u16 ctrl; | |
4277 | ||
4278 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); | |
4279 | if (pos) { | |
4280 | pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl); | |
4281 | if (ctrl & PCI_SRIOV_CTRL_VFE) | |
4282 | return 1; | |
4283 | } | |
4284 | return 0; | |
4285 | } | |
4286 | ||
956a2066 | 4287 | static const struct vxge_hw_uld_cbs vxge_callbacks = { |
4288 | .link_up = vxge_callback_link_up, | |
4289 | .link_down = vxge_callback_link_down, | |
4290 | .crit_err = vxge_callback_crit_err, | |
4291 | }; | |
4292 | ||
703da5a1 RV |
4293 | /** |
4294 | * vxge_probe | |
4295 | * @pdev : structure containing the PCI related information of the device. | |
4296 | * @pre: List of PCI devices supported by the driver listed in vxge_id_table. | |
4297 | * Description: | |
4298 | * This function is called when a new PCI device gets detected and initializes | |
4299 | * it. | |
4300 | * Return value: | |
4301 | * returns 0 on success and negative on failure. | |
4302 | * | |
4303 | */ | |
4304 | static int __devinit | |
4305 | vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) | |
4306 | { | |
2c91308f | 4307 | struct __vxge_hw_device *hldev; |
703da5a1 RV |
4308 | enum vxge_hw_status status; |
4309 | int ret; | |
4310 | int high_dma = 0; | |
4311 | u64 vpath_mask = 0; | |
4312 | struct vxgedev *vdev; | |
7dad171c | 4313 | struct vxge_config *ll_config = NULL; |
703da5a1 RV |
4314 | struct vxge_hw_device_config *device_config = NULL; |
4315 | struct vxge_hw_device_attr attr; | |
4316 | int i, j, no_of_vpath = 0, max_vpath_supported = 0; | |
4317 | u8 *macaddr; | |
4318 | struct vxge_mac_addrs *entry; | |
4319 | static int bus = -1, device = -1; | |
cb27ec60 | 4320 | u32 host_type; |
703da5a1 | 4321 | u8 new_device = 0; |
cb27ec60 SH |
4322 | enum vxge_hw_status is_privileged; |
4323 | u32 function_mode; | |
4324 | u32 num_vfs = 0; | |
703da5a1 RV |
4325 | |
4326 | vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); | |
4327 | attr.pdev = pdev; | |
4328 | ||
cb27ec60 | 4329 | /* In SRIOV-17 mode, functions of the same adapter |
528f7272 JM |
4330 | * can be deployed on different buses |
4331 | */ | |
4332 | if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) && | |
4333 | !pdev->is_virtfn) | |
703da5a1 RV |
4334 | new_device = 1; |
4335 | ||
4336 | bus = pdev->bus->number; | |
4337 | device = PCI_SLOT(pdev->devfn); | |
4338 | ||
4339 | if (new_device) { | |
4340 | if (driver_config->config_dev_cnt && | |
4341 | (driver_config->config_dev_cnt != | |
4342 | driver_config->total_dev_cnt)) | |
4343 | vxge_debug_init(VXGE_ERR, | |
4344 | "%s: Configured %d of %d devices", | |
4345 | VXGE_DRIVER_NAME, | |
4346 | driver_config->config_dev_cnt, | |
4347 | driver_config->total_dev_cnt); | |
4348 | driver_config->config_dev_cnt = 0; | |
4349 | driver_config->total_dev_cnt = 0; | |
703da5a1 | 4350 | } |
528f7272 | 4351 | |
9002397e SH |
4352 | /* Now making the CPU based no of vpath calculation |
4353 | * applicable for individual functions as well. | |
4354 | */ | |
4355 | driver_config->g_no_cpus = 0; | |
657205bd SH |
4356 | driver_config->vpath_per_dev = max_config_vpath; |
4357 | ||
703da5a1 RV |
4358 | driver_config->total_dev_cnt++; |
4359 | if (++driver_config->config_dev_cnt > max_config_dev) { | |
4360 | ret = 0; | |
4361 | goto _exit0; | |
4362 | } | |
4363 | ||
4364 | device_config = kzalloc(sizeof(struct vxge_hw_device_config), | |
4365 | GFP_KERNEL); | |
4366 | if (!device_config) { | |
4367 | ret = -ENOMEM; | |
4368 | vxge_debug_init(VXGE_ERR, | |
4369 | "device_config : malloc failed %s %d", | |
4370 | __FILE__, __LINE__); | |
4371 | goto _exit0; | |
4372 | } | |
4373 | ||
528f7272 | 4374 | ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL); |
7dad171c PB |
4375 | if (!ll_config) { |
4376 | ret = -ENOMEM; | |
4377 | vxge_debug_init(VXGE_ERR, | |
528f7272 | 4378 | "device_config : malloc failed %s %d", |
7dad171c PB |
4379 | __FILE__, __LINE__); |
4380 | goto _exit0; | |
4381 | } | |
4382 | ll_config->tx_steering_type = TX_MULTIQ_STEERING; | |
4383 | ll_config->intr_type = MSI_X; | |
4384 | ll_config->napi_weight = NEW_NAPI_WEIGHT; | |
4385 | ll_config->rth_steering = RTH_STEERING; | |
703da5a1 RV |
4386 | |
4387 | /* get the default configuration parameters */ | |
4388 | vxge_hw_device_config_default_get(device_config); | |
4389 | ||
4390 | /* initialize configuration parameters */ | |
7dad171c | 4391 | vxge_device_config_init(device_config, &ll_config->intr_type); |
703da5a1 RV |
4392 | |
4393 | ret = pci_enable_device(pdev); | |
4394 | if (ret) { | |
4395 | vxge_debug_init(VXGE_ERR, | |
4396 | "%s : can not enable PCI device", __func__); | |
4397 | goto _exit0; | |
4398 | } | |
4399 | ||
b3837cec | 4400 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
703da5a1 RV |
4401 | vxge_debug_ll_config(VXGE_TRACE, |
4402 | "%s : using 64bit DMA", __func__); | |
4403 | ||
4404 | high_dma = 1; | |
4405 | ||
4406 | if (pci_set_consistent_dma_mask(pdev, | |
b3837cec | 4407 | DMA_BIT_MASK(64))) { |
703da5a1 RV |
4408 | vxge_debug_init(VXGE_ERR, |
4409 | "%s : unable to obtain 64bit DMA for " | |
4410 | "consistent allocations", __func__); | |
4411 | ret = -ENOMEM; | |
4412 | goto _exit1; | |
4413 | } | |
b3837cec | 4414 | } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { |
703da5a1 RV |
4415 | vxge_debug_ll_config(VXGE_TRACE, |
4416 | "%s : using 32bit DMA", __func__); | |
4417 | } else { | |
4418 | ret = -ENOMEM; | |
4419 | goto _exit1; | |
4420 | } | |
4421 | ||
6cca2003 JM |
4422 | ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME); |
4423 | if (ret) { | |
703da5a1 RV |
4424 | vxge_debug_init(VXGE_ERR, |
4425 | "%s : request regions failed", __func__); | |
703da5a1 RV |
4426 | goto _exit1; |
4427 | } | |
4428 | ||
4429 | pci_set_master(pdev); | |
4430 | ||
4431 | attr.bar0 = pci_ioremap_bar(pdev, 0); | |
4432 | if (!attr.bar0) { | |
4433 | vxge_debug_init(VXGE_ERR, | |
4434 | "%s : cannot remap io memory bar0", __func__); | |
4435 | ret = -ENODEV; | |
4436 | goto _exit2; | |
4437 | } | |
4438 | vxge_debug_ll_config(VXGE_TRACE, | |
4439 | "pci ioremap bar0: %p:0x%llx", | |
4440 | attr.bar0, | |
4441 | (unsigned long long)pci_resource_start(pdev, 0)); | |
4442 | ||
703da5a1 | 4443 | status = vxge_hw_device_hw_info_get(attr.bar0, |
7dad171c | 4444 | &ll_config->device_hw_info); |
703da5a1 RV |
4445 | if (status != VXGE_HW_OK) { |
4446 | vxge_debug_init(VXGE_ERR, | |
4447 | "%s: Reading of hardware info failed." | |
4448 | "Please try upgrading the firmware.", VXGE_DRIVER_NAME); | |
4449 | ret = -EINVAL; | |
7975d1ee | 4450 | goto _exit3; |
703da5a1 RV |
4451 | } |
4452 | ||
7dad171c | 4453 | vpath_mask = ll_config->device_hw_info.vpath_mask; |
703da5a1 RV |
4454 | if (vpath_mask == 0) { |
4455 | vxge_debug_ll_config(VXGE_TRACE, | |
4456 | "%s: No vpaths available in device", VXGE_DRIVER_NAME); | |
4457 | ret = -EINVAL; | |
7975d1ee | 4458 | goto _exit3; |
703da5a1 RV |
4459 | } |
4460 | ||
4461 | vxge_debug_ll_config(VXGE_TRACE, | |
4462 | "%s:%d Vpath mask = %llx", __func__, __LINE__, | |
4463 | (unsigned long long)vpath_mask); | |
4464 | ||
7dad171c PB |
4465 | function_mode = ll_config->device_hw_info.function_mode; |
4466 | host_type = ll_config->device_hw_info.host_type; | |
cb27ec60 | 4467 | is_privileged = __vxge_hw_device_is_privilaged(host_type, |
7dad171c | 4468 | ll_config->device_hw_info.func_id); |
cb27ec60 | 4469 | |
703da5a1 RV |
4470 | /* Check how many vpaths are available */ |
4471 | for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
4472 | if (!((vpath_mask) & vxge_mBIT(i))) | |
4473 | continue; | |
4474 | max_vpath_supported++; | |
4475 | } | |
4476 | ||
cb27ec60 SH |
4477 | if (new_device) |
4478 | num_vfs = vxge_get_num_vfs(function_mode) - 1; | |
4479 | ||
5dbc9011 | 4480 | /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ |
c92bf70d JM |
4481 | if (is_sriov(function_mode) && !is_sriov_initialized(pdev) && |
4482 | (ll_config->intr_type != INTA)) { | |
4483 | ret = pci_enable_sriov(pdev, num_vfs); | |
cb27ec60 SH |
4484 | if (ret) |
4485 | vxge_debug_ll_config(VXGE_ERR, | |
4486 | "Failed in enabling SRIOV mode: %d\n", ret); | |
c92bf70d | 4487 | /* No need to fail out, as an error here is non-fatal */ |
5dbc9011 SS |
4488 | } |
4489 | ||
703da5a1 RV |
4490 | /* |
4491 | * Configure vpaths and get driver configured number of vpaths | |
4492 | * which is less than or equal to the maximum vpaths per function. | |
4493 | */ | |
7dad171c | 4494 | no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config); |
703da5a1 RV |
4495 | if (!no_of_vpath) { |
4496 | vxge_debug_ll_config(VXGE_ERR, | |
4497 | "%s: No more vpaths to configure", VXGE_DRIVER_NAME); | |
4498 | ret = 0; | |
7975d1ee | 4499 | goto _exit3; |
703da5a1 RV |
4500 | } |
4501 | ||
4502 | /* Setting driver callbacks */ | |
956a2066 | 4503 | attr.uld_callbacks = &vxge_callbacks; |
703da5a1 RV |
4504 | |
4505 | status = vxge_hw_device_initialize(&hldev, &attr, device_config); | |
4506 | if (status != VXGE_HW_OK) { | |
4507 | vxge_debug_init(VXGE_ERR, | |
4508 | "Failed to initialize device (%d)", status); | |
4509 | ret = -EINVAL; | |
7975d1ee | 4510 | goto _exit3; |
703da5a1 RV |
4511 | } |
4512 | ||
e8ac1756 JM |
4513 | if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major, |
4514 | ll_config->device_hw_info.fw_version.minor, | |
4515 | ll_config->device_hw_info.fw_version.build) >= | |
4516 | VXGE_EPROM_FW_VER) { | |
4517 | struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES]; | |
4518 | ||
4519 | status = vxge_hw_vpath_eprom_img_ver_get(hldev, img); | |
4520 | if (status != VXGE_HW_OK) { | |
4521 | vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed", | |
4522 | VXGE_DRIVER_NAME); | |
4523 | /* This is a non-fatal error, continue */ | |
4524 | } | |
4525 | ||
4526 | for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) { | |
4527 | hldev->eprom_versions[i] = img[i].version; | |
4528 | if (!img[i].is_valid) | |
4529 | break; | |
4530 | vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version " | |
1d15f81c | 4531 | "%d.%d.%d.%d", VXGE_DRIVER_NAME, i, |
e8ac1756 JM |
4532 | VXGE_EPROM_IMG_MAJOR(img[i].version), |
4533 | VXGE_EPROM_IMG_MINOR(img[i].version), | |
4534 | VXGE_EPROM_IMG_FIX(img[i].version), | |
4535 | VXGE_EPROM_IMG_BUILD(img[i].version)); | |
4536 | } | |
4537 | } | |
4538 | ||
fa41fd10 | 4539 | /* if FCS stripping is not disabled in MAC fail driver load */ |
b81b3733 JM |
4540 | status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask); |
4541 | if (status != VXGE_HW_OK) { | |
4542 | vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC" | |
4543 | " failing driver load", VXGE_DRIVER_NAME); | |
fa41fd10 SH |
4544 | ret = -EINVAL; |
4545 | goto _exit4; | |
4546 | } | |
4547 | ||
cd883a79 JM |
4548 | /* Always enable HWTS. This will always cause the FCS to be invalid, |
4549 | * due to the fact that HWTS is using the FCS as the location of the | |
4550 | * timestamp. The HW FCS checking will still correctly determine if | |
4551 | * there is a valid checksum, and the FCS is being removed by the driver | |
4552 | * anyway. So no fucntionality is being lost. Since it is always | |
4553 | * enabled, we now simply use the ioctl call to set whether or not the | |
4554 | * driver should be paying attention to the HWTS. | |
4555 | */ | |
4556 | if (is_privileged == VXGE_HW_OK) { | |
4557 | status = vxge_timestamp_config(hldev); | |
4558 | if (status != VXGE_HW_OK) { | |
4559 | vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed", | |
4560 | VXGE_DRIVER_NAME); | |
4561 | ret = -EFAULT; | |
4562 | goto _exit4; | |
4563 | } | |
4564 | } | |
4565 | ||
703da5a1 RV |
4566 | vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); |
4567 | ||
4568 | /* set private device info */ | |
4569 | pci_set_drvdata(pdev, hldev); | |
4570 | ||
7dad171c PB |
4571 | ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; |
4572 | ll_config->addr_learn_en = addr_learn_en; | |
4573 | ll_config->rth_algorithm = RTH_ALG_JENKINS; | |
47f01db4 JM |
4574 | ll_config->rth_hash_type_tcpipv4 = 1; |
4575 | ll_config->rth_hash_type_ipv4 = 0; | |
4576 | ll_config->rth_hash_type_tcpipv6 = 0; | |
4577 | ll_config->rth_hash_type_ipv6 = 0; | |
4578 | ll_config->rth_hash_type_tcpipv6ex = 0; | |
4579 | ll_config->rth_hash_type_ipv6ex = 0; | |
7dad171c PB |
4580 | ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; |
4581 | ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; | |
4582 | ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; | |
4583 | ||
e8ac1756 JM |
4584 | ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, |
4585 | &vdev); | |
4586 | if (ret) { | |
703da5a1 | 4587 | ret = -EINVAL; |
7975d1ee | 4588 | goto _exit4; |
703da5a1 RV |
4589 | } |
4590 | ||
e8ac1756 JM |
4591 | ret = vxge_probe_fw_update(vdev); |
4592 | if (ret) | |
4593 | goto _exit5; | |
4594 | ||
703da5a1 RV |
4595 | vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); |
4596 | VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), | |
4597 | vxge_hw_device_trace_level_get(hldev)); | |
4598 | ||
4599 | /* set private HW device info */ | |
703da5a1 RV |
4600 | vdev->mtu = VXGE_HW_DEFAULT_MTU; |
4601 | vdev->bar0 = attr.bar0; | |
703da5a1 RV |
4602 | vdev->max_vpath_supported = max_vpath_supported; |
4603 | vdev->no_of_vpath = no_of_vpath; | |
4604 | ||
4605 | /* Virtual Path count */ | |
4606 | for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { | |
4607 | if (!vxge_bVALn(vpath_mask, i, 1)) | |
4608 | continue; | |
4609 | if (j >= vdev->no_of_vpath) | |
4610 | break; | |
4611 | ||
4612 | vdev->vpaths[j].is_configured = 1; | |
4613 | vdev->vpaths[j].device_id = i; | |
703da5a1 RV |
4614 | vdev->vpaths[j].ring.driver_id = j; |
4615 | vdev->vpaths[j].vdev = vdev; | |
4616 | vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath; | |
4617 | memcpy((u8 *)vdev->vpaths[j].macaddr, | |
7dad171c | 4618 | ll_config->device_hw_info.mac_addrs[i], |
703da5a1 RV |
4619 | ETH_ALEN); |
4620 | ||
4621 | /* Initialize the mac address list header */ | |
4622 | INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list); | |
4623 | ||
4624 | vdev->vpaths[j].mac_addr_cnt = 0; | |
4625 | vdev->vpaths[j].mcast_addr_cnt = 0; | |
4626 | j++; | |
4627 | } | |
4628 | vdev->exec_mode = VXGE_EXEC_MODE_DISABLE; | |
4629 | vdev->max_config_port = max_config_port; | |
4630 | ||
4631 | vdev->vlan_tag_strip = vlan_tag_strip; | |
4632 | ||
4633 | /* map the hashing selector table to the configured vpaths */ | |
4634 | for (i = 0; i < vdev->no_of_vpath; i++) | |
4635 | vdev->vpath_selector[i] = vpath_selector[i]; | |
4636 | ||
4637 | macaddr = (u8 *)vdev->vpaths[0].macaddr; | |
4638 | ||
7dad171c PB |
4639 | ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0'; |
4640 | ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0'; | |
4641 | ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0'; | |
703da5a1 RV |
4642 | |
4643 | vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s", | |
7dad171c | 4644 | vdev->ndev->name, ll_config->device_hw_info.serial_number); |
703da5a1 RV |
4645 | |
4646 | vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s", | |
7dad171c | 4647 | vdev->ndev->name, ll_config->device_hw_info.part_number); |
703da5a1 RV |
4648 | |
4649 | vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", | |
7dad171c | 4650 | vdev->ndev->name, ll_config->device_hw_info.product_desc); |
703da5a1 | 4651 | |
bf54e736 | 4652 | vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM", |
4653 | vdev->ndev->name, macaddr); | |
703da5a1 RV |
4654 | |
4655 | vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d", | |
4656 | vdev->ndev->name, vxge_hw_device_link_width_get(hldev)); | |
4657 | ||
4658 | vxge_debug_init(VXGE_TRACE, | |
4659 | "%s: Firmware version : %s Date : %s", vdev->ndev->name, | |
7dad171c PB |
4660 | ll_config->device_hw_info.fw_version.version, |
4661 | ll_config->device_hw_info.fw_date.date); | |
703da5a1 | 4662 | |
0a25bdc6 | 4663 | if (new_device) { |
7dad171c | 4664 | switch (ll_config->device_hw_info.function_mode) { |
0a25bdc6 SH |
4665 | case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: |
4666 | vxge_debug_init(VXGE_TRACE, | |
4667 | "%s: Single Function Mode Enabled", vdev->ndev->name); | |
4668 | break; | |
4669 | case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION: | |
4670 | vxge_debug_init(VXGE_TRACE, | |
4671 | "%s: Multi Function Mode Enabled", vdev->ndev->name); | |
4672 | break; | |
4673 | case VXGE_HW_FUNCTION_MODE_SRIOV: | |
4674 | vxge_debug_init(VXGE_TRACE, | |
4675 | "%s: Single Root IOV Mode Enabled", vdev->ndev->name); | |
4676 | break; | |
4677 | case VXGE_HW_FUNCTION_MODE_MRIOV: | |
4678 | vxge_debug_init(VXGE_TRACE, | |
4679 | "%s: Multi Root IOV Mode Enabled", vdev->ndev->name); | |
4680 | break; | |
4681 | } | |
4682 | } | |
4683 | ||
703da5a1 RV |
4684 | vxge_print_parm(vdev, vpath_mask); |
4685 | ||
4686 | /* Store the fw version for ethttool option */ | |
7dad171c | 4687 | strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version); |
703da5a1 RV |
4688 | memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); |
4689 | memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN); | |
4690 | ||
4691 | /* Copy the station mac address to the list */ | |
4692 | for (i = 0; i < vdev->no_of_vpath; i++) { | |
e80be0b0 | 4693 | entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL); |
703da5a1 RV |
4694 | if (NULL == entry) { |
4695 | vxge_debug_init(VXGE_ERR, | |
4696 | "%s: mac_addr_list : memory allocation failed", | |
4697 | vdev->ndev->name); | |
4698 | ret = -EPERM; | |
e8ac1756 | 4699 | goto _exit6; |
703da5a1 RV |
4700 | } |
4701 | macaddr = (u8 *)&entry->macaddr; | |
4702 | memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); | |
4703 | list_add(&entry->item, &vdev->vpaths[i].mac_addr_list); | |
4704 | vdev->vpaths[i].mac_addr_cnt = 1; | |
4705 | } | |
4706 | ||
914d0d71 | 4707 | kfree(device_config); |
eb5f10c2 SH |
4708 | |
4709 | /* | |
4710 | * INTA is shared in multi-function mode. This is unlike the INTA | |
4711 | * implementation in MR mode, where each VH has its own INTA message. | |
4712 | * - INTA is masked (disabled) as long as at least one function sets | |
4713 | * its TITAN_MASK_ALL_INT.ALARM bit. | |
4714 | * - INTA is unmasked (enabled) when all enabled functions have cleared | |
4715 | * their own TITAN_MASK_ALL_INT.ALARM bit. | |
4716 | * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up. | |
4717 | * Though this driver leaves the top level interrupts unmasked while | |
4718 | * leaving the required module interrupt bits masked on exit, there | |
4719 | * could be a rougue driver around that does not follow this procedure | |
4720 | * resulting in a failure to generate interrupts. The following code is | |
4721 | * present to prevent such a failure. | |
4722 | */ | |
4723 | ||
7dad171c | 4724 | if (ll_config->device_hw_info.function_mode == |
eb5f10c2 SH |
4725 | VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) |
4726 | if (vdev->config.intr_type == INTA) | |
4727 | vxge_hw_device_unmask_all(hldev); | |
4728 | ||
703da5a1 RV |
4729 | vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", |
4730 | vdev->ndev->name, __func__, __LINE__); | |
4731 | ||
4732 | vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); | |
4733 | VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), | |
4734 | vxge_hw_device_trace_level_get(hldev)); | |
4735 | ||
7dad171c | 4736 | kfree(ll_config); |
703da5a1 RV |
4737 | return 0; |
4738 | ||
e8ac1756 | 4739 | _exit6: |
703da5a1 RV |
4740 | for (i = 0; i < vdev->no_of_vpath; i++) |
4741 | vxge_free_mac_add_list(&vdev->vpaths[i]); | |
e8ac1756 | 4742 | _exit5: |
703da5a1 | 4743 | vxge_device_unregister(hldev); |
7975d1ee | 4744 | _exit4: |
6cca2003 | 4745 | pci_set_drvdata(pdev, NULL); |
703da5a1 | 4746 | vxge_hw_device_terminate(hldev); |
6cca2003 | 4747 | pci_disable_sriov(pdev); |
703da5a1 RV |
4748 | _exit3: |
4749 | iounmap(attr.bar0); | |
4750 | _exit2: | |
dc66daa9 | 4751 | pci_release_region(pdev, 0); |
703da5a1 RV |
4752 | _exit1: |
4753 | pci_disable_device(pdev); | |
4754 | _exit0: | |
7dad171c | 4755 | kfree(ll_config); |
703da5a1 RV |
4756 | kfree(device_config); |
4757 | driver_config->config_dev_cnt--; | |
6cca2003 | 4758 | driver_config->total_dev_cnt--; |
703da5a1 RV |
4759 | return ret; |
4760 | } | |
4761 | ||
4762 | /** | |
4763 | * vxge_rem_nic - Free the PCI device | |
4764 | * @pdev: structure containing the PCI related information of the device. | |
4765 | * Description: This function is called by the Pci subsystem to release a | |
4766 | * PCI device and free up all resource held up by the device. | |
4767 | */ | |
2c91308f | 4768 | static void __devexit vxge_remove(struct pci_dev *pdev) |
703da5a1 | 4769 | { |
2c91308f | 4770 | struct __vxge_hw_device *hldev; |
6cca2003 JM |
4771 | struct vxgedev *vdev; |
4772 | int i; | |
703da5a1 | 4773 | |
d8ee7071 | 4774 | hldev = pci_get_drvdata(pdev); |
703da5a1 RV |
4775 | if (hldev == NULL) |
4776 | return; | |
2c91308f | 4777 | |
6cca2003 | 4778 | vdev = netdev_priv(hldev->ndev); |
703da5a1 | 4779 | |
2c91308f | 4780 | vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__); |
2c91308f JM |
4781 | vxge_debug_init(vdev->level_trace, "%s : removing PCI device...", |
4782 | __func__); | |
703da5a1 | 4783 | |
6cca2003 | 4784 | for (i = 0; i < vdev->no_of_vpath; i++) |
703da5a1 | 4785 | vxge_free_mac_add_list(&vdev->vpaths[i]); |
703da5a1 | 4786 | |
6cca2003 JM |
4787 | vxge_device_unregister(hldev); |
4788 | pci_set_drvdata(pdev, NULL); | |
4789 | /* Do not call pci_disable_sriov here, as it will break child devices */ | |
4790 | vxge_hw_device_terminate(hldev); | |
703da5a1 | 4791 | iounmap(vdev->bar0); |
6cca2003 JM |
4792 | pci_release_region(pdev, 0); |
4793 | pci_disable_device(pdev); | |
4794 | driver_config->config_dev_cnt--; | |
4795 | driver_config->total_dev_cnt--; | |
703da5a1 | 4796 | |
2c91308f JM |
4797 | vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered", |
4798 | __func__, __LINE__); | |
2c91308f JM |
4799 | vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__, |
4800 | __LINE__); | |
703da5a1 RV |
4801 | } |
4802 | ||
4803 | static struct pci_error_handlers vxge_err_handler = { | |
4804 | .error_detected = vxge_io_error_detected, | |
4805 | .slot_reset = vxge_io_slot_reset, | |
4806 | .resume = vxge_io_resume, | |
4807 | }; | |
4808 | ||
4809 | static struct pci_driver vxge_driver = { | |
4810 | .name = VXGE_DRIVER_NAME, | |
4811 | .id_table = vxge_id_table, | |
4812 | .probe = vxge_probe, | |
4813 | .remove = __devexit_p(vxge_remove), | |
4814 | #ifdef CONFIG_PM | |
4815 | .suspend = vxge_pm_suspend, | |
4816 | .resume = vxge_pm_resume, | |
4817 | #endif | |
4818 | .err_handler = &vxge_err_handler, | |
4819 | }; | |
4820 | ||
4821 | static int __init | |
4822 | vxge_starter(void) | |
4823 | { | |
4824 | int ret = 0; | |
703da5a1 | 4825 | |
75f5e1c6 JP |
4826 | pr_info("Copyright(c) 2002-2010 Exar Corp.\n"); |
4827 | pr_info("Driver version: %s\n", DRV_VERSION); | |
703da5a1 RV |
4828 | |
4829 | verify_bandwidth(); | |
4830 | ||
4831 | driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL); | |
4832 | if (!driver_config) | |
4833 | return -ENOMEM; | |
4834 | ||
4835 | ret = pci_register_driver(&vxge_driver); | |
528f7272 JM |
4836 | if (ret) { |
4837 | kfree(driver_config); | |
4838 | goto err; | |
4839 | } | |
703da5a1 RV |
4840 | |
4841 | if (driver_config->config_dev_cnt && | |
4842 | (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) | |
4843 | vxge_debug_init(VXGE_ERR, | |
4844 | "%s: Configured %d of %d devices", | |
4845 | VXGE_DRIVER_NAME, driver_config->config_dev_cnt, | |
4846 | driver_config->total_dev_cnt); | |
528f7272 | 4847 | err: |
703da5a1 RV |
4848 | return ret; |
4849 | } | |
4850 | ||
4851 | static void __exit | |
4852 | vxge_closer(void) | |
4853 | { | |
4854 | pci_unregister_driver(&vxge_driver); | |
4855 | kfree(driver_config); | |
4856 | } | |
4857 | module_init(vxge_starter); | |
4858 | module_exit(vxge_closer); |