Merge 2.6.38-rc5 into staging-next
[deliverable/linux.git] / drivers / net / vxge / vxge-config.c
1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
18 #include <linux/slab.h>
19
20 #include "vxge-traffic.h"
21 #include "vxge-config.h"
22 #include "vxge-main.h"
23
24 #define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
25 status = __vxge_hw_vpath_stats_access(vpath, \
26 VXGE_HW_STATS_OP_READ, \
27 offset, \
28 &val64); \
29 if (status != VXGE_HW_OK) \
30 return status; \
31 }
32
33 static void
34 vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
35 {
36 u64 val64;
37
38 val64 = readq(&vp_reg->rxmac_vcfg0);
39 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
40 writeq(val64, &vp_reg->rxmac_vcfg0);
41 val64 = readq(&vp_reg->rxmac_vcfg0);
42 }
43
44 /*
45 * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
46 */
47 int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
48 {
49 struct vxge_hw_vpath_reg __iomem *vp_reg;
50 struct __vxge_hw_virtualpath *vpath;
51 u64 val64, rxd_count, rxd_spat;
52 int count = 0, total_count = 0;
53
54 vpath = &hldev->virtual_paths[vp_id];
55 vp_reg = vpath->vp_reg;
56
57 vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
58
59 /* Check that the ring controller for this vpath has enough free RxDs
60 * to send frames to the host. This is done by reading the
61 * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
62 * RXD_SPAT value for the vpath.
63 */
64 val64 = readq(&vp_reg->prc_cfg6);
65 rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
66 /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
67 * leg room.
68 */
69 rxd_spat *= 2;
70
71 do {
72 mdelay(1);
73
74 rxd_count = readq(&vp_reg->prc_rxd_doorbell);
75
76 /* Check that the ring controller for this vpath does
77 * not have any frame in its pipeline.
78 */
79 val64 = readq(&vp_reg->frm_in_progress_cnt);
80 if ((rxd_count <= rxd_spat) || (val64 > 0))
81 count = 0;
82 else
83 count++;
84 total_count++;
85 } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
86 (total_count < VXGE_HW_MAX_POLLING_COUNT));
87
88 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
89 printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
90 __func__);
91
92 return total_count;
93 }
94
95 /* vxge_hw_device_wait_receive_idle - This function waits until all frames
96 * stored in the frame buffer for each vpath assigned to the given
97 * function (hldev) have been sent to the host.
98 */
99 void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
100 {
101 int i, total_count = 0;
102
103 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
104 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
105 continue;
106
107 total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
108 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
109 break;
110 }
111 }
112
113 /*
114 * __vxge_hw_device_register_poll
115 * Will poll certain register for specified amount of time.
116 * Will poll until masked bit is not cleared.
117 */
118 static enum vxge_hw_status
119 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
120 {
121 u64 val64;
122 u32 i = 0;
123 enum vxge_hw_status ret = VXGE_HW_FAIL;
124
125 udelay(10);
126
127 do {
128 val64 = readq(reg);
129 if (!(val64 & mask))
130 return VXGE_HW_OK;
131 udelay(100);
132 } while (++i <= 9);
133
134 i = 0;
135 do {
136 val64 = readq(reg);
137 if (!(val64 & mask))
138 return VXGE_HW_OK;
139 mdelay(1);
140 } while (++i <= max_millis);
141
142 return ret;
143 }
144
145 static inline enum vxge_hw_status
146 __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
147 u64 mask, u32 max_millis)
148 {
149 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
150 wmb();
151 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
152 wmb();
153
154 return __vxge_hw_device_register_poll(addr, mask, max_millis);
155 }
156
157 static enum vxge_hw_status
158 vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
159 u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
160 u64 *steer_ctrl)
161 {
162 struct vxge_hw_vpath_reg __iomem *vp_reg;
163 enum vxge_hw_status status;
164 u64 val64;
165 u32 retry = 0, max_retry = 100;
166
167 vp_reg = vpath->vp_reg;
168
169 if (vpath->vp_open) {
170 max_retry = 3;
171 spin_lock(&vpath->lock);
172 }
173
174 writeq(*data0, &vp_reg->rts_access_steer_data0);
175 writeq(*data1, &vp_reg->rts_access_steer_data1);
176 wmb();
177
178 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
179 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
180 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
181 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
182 *steer_ctrl;
183
184 status = __vxge_hw_pio_mem_write64(val64,
185 &vp_reg->rts_access_steer_ctrl,
186 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
187 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
188
189 /* The __vxge_hw_device_register_poll can udelay for a significant
190 * amount of time, blocking other proccess from the CPU. If it delays
191 * for ~5secs, a NMI error can occur. A way around this is to give up
192 * the processor via msleep, but this is not allowed is under lock.
193 * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
194 * 1sec and sleep for 10ms until the firmware operation has completed
195 * or timed-out.
196 */
197 while ((status != VXGE_HW_OK) && retry++ < max_retry) {
198 if (!vpath->vp_open)
199 msleep(20);
200 status = __vxge_hw_device_register_poll(
201 &vp_reg->rts_access_steer_ctrl,
202 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
203 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
204 }
205
206 if (status != VXGE_HW_OK)
207 goto out;
208
209 val64 = readq(&vp_reg->rts_access_steer_ctrl);
210 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
211 *data0 = readq(&vp_reg->rts_access_steer_data0);
212 *data1 = readq(&vp_reg->rts_access_steer_data1);
213 *steer_ctrl = val64;
214 } else
215 status = VXGE_HW_FAIL;
216
217 out:
218 if (vpath->vp_open)
219 spin_unlock(&vpath->lock);
220 return status;
221 }
222
223 enum vxge_hw_status
224 vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
225 u32 *minor, u32 *build)
226 {
227 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
228 struct __vxge_hw_virtualpath *vpath;
229 enum vxge_hw_status status;
230
231 vpath = &hldev->virtual_paths[hldev->first_vp_id];
232
233 status = vxge_hw_vpath_fw_api(vpath,
234 VXGE_HW_FW_UPGRADE_ACTION,
235 VXGE_HW_FW_UPGRADE_MEMO,
236 VXGE_HW_FW_UPGRADE_OFFSET_READ,
237 &data0, &data1, &steer_ctrl);
238 if (status != VXGE_HW_OK)
239 return status;
240
241 *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
242 *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
243 *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
244
245 return status;
246 }
247
248 enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
249 {
250 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
251 struct __vxge_hw_virtualpath *vpath;
252 enum vxge_hw_status status;
253 u32 ret;
254
255 vpath = &hldev->virtual_paths[hldev->first_vp_id];
256
257 status = vxge_hw_vpath_fw_api(vpath,
258 VXGE_HW_FW_UPGRADE_ACTION,
259 VXGE_HW_FW_UPGRADE_MEMO,
260 VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
261 &data0, &data1, &steer_ctrl);
262 if (status != VXGE_HW_OK) {
263 vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
264 goto exit;
265 }
266
267 ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
268 if (ret != 1) {
269 vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
270 __func__, ret);
271 status = VXGE_HW_FAIL;
272 }
273
274 exit:
275 return status;
276 }
277
278 enum vxge_hw_status
279 vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
280 {
281 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
282 struct __vxge_hw_virtualpath *vpath;
283 enum vxge_hw_status status;
284 int ret_code, sec_code;
285
286 vpath = &hldev->virtual_paths[hldev->first_vp_id];
287
288 /* send upgrade start command */
289 status = vxge_hw_vpath_fw_api(vpath,
290 VXGE_HW_FW_UPGRADE_ACTION,
291 VXGE_HW_FW_UPGRADE_MEMO,
292 VXGE_HW_FW_UPGRADE_OFFSET_START,
293 &data0, &data1, &steer_ctrl);
294 if (status != VXGE_HW_OK) {
295 vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
296 __func__);
297 return status;
298 }
299
300 /* Transfer fw image to adapter 16 bytes at a time */
301 for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
302 steer_ctrl = 0;
303
304 /* The next 128bits of fwdata to be loaded onto the adapter */
305 data0 = *((u64 *)fwdata);
306 data1 = *((u64 *)fwdata + 1);
307
308 status = vxge_hw_vpath_fw_api(vpath,
309 VXGE_HW_FW_UPGRADE_ACTION,
310 VXGE_HW_FW_UPGRADE_MEMO,
311 VXGE_HW_FW_UPGRADE_OFFSET_SEND,
312 &data0, &data1, &steer_ctrl);
313 if (status != VXGE_HW_OK) {
314 vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
315 __func__);
316 goto out;
317 }
318
319 ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
320 switch (ret_code) {
321 case VXGE_HW_FW_UPGRADE_OK:
322 /* All OK, send next 16 bytes. */
323 break;
324 case VXGE_FW_UPGRADE_BYTES2SKIP:
325 /* skip bytes in the stream */
326 fwdata += (data0 >> 8) & 0xFFFFFFFF;
327 break;
328 case VXGE_HW_FW_UPGRADE_DONE:
329 goto out;
330 case VXGE_HW_FW_UPGRADE_ERR:
331 sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
332 switch (sec_code) {
333 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
334 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
335 printk(KERN_ERR
336 "corrupted data from .ncf file\n");
337 break;
338 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
339 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
340 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
341 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
342 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
343 printk(KERN_ERR "invalid .ncf file\n");
344 break;
345 case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
346 printk(KERN_ERR "buffer overflow\n");
347 break;
348 case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
349 printk(KERN_ERR "failed to flash the image\n");
350 break;
351 case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
352 printk(KERN_ERR
353 "generic error. Unknown error type\n");
354 break;
355 default:
356 printk(KERN_ERR "Unknown error of type %d\n",
357 sec_code);
358 break;
359 }
360 status = VXGE_HW_FAIL;
361 goto out;
362 default:
363 printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
364 status = VXGE_HW_FAIL;
365 goto out;
366 }
367 /* point to next 16 bytes */
368 fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
369 }
370 out:
371 return status;
372 }
373
374 enum vxge_hw_status
375 vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
376 struct eprom_image *img)
377 {
378 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
379 struct __vxge_hw_virtualpath *vpath;
380 enum vxge_hw_status status;
381 int i;
382
383 vpath = &hldev->virtual_paths[hldev->first_vp_id];
384
385 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
386 data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
387 data1 = steer_ctrl = 0;
388
389 status = vxge_hw_vpath_fw_api(vpath,
390 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
391 VXGE_HW_FW_API_GET_EPROM_REV,
392 0, &data0, &data1, &steer_ctrl);
393 if (status != VXGE_HW_OK)
394 break;
395
396 img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
397 img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
398 img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
399 img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
400 }
401
402 return status;
403 }
404
405 /*
406 * __vxge_hw_channel_free - Free memory allocated for channel
407 * This function deallocates memory from the channel and various arrays
408 * in the channel
409 */
410 static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
411 {
412 kfree(channel->work_arr);
413 kfree(channel->free_arr);
414 kfree(channel->reserve_arr);
415 kfree(channel->orig_arr);
416 kfree(channel);
417 }
418
419 /*
420 * __vxge_hw_channel_initialize - Initialize a channel
421 * This function initializes a channel by properly setting the
422 * various references
423 */
424 static enum vxge_hw_status
425 __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
426 {
427 u32 i;
428 struct __vxge_hw_virtualpath *vpath;
429
430 vpath = channel->vph->vpath;
431
432 if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
433 for (i = 0; i < channel->length; i++)
434 channel->orig_arr[i] = channel->reserve_arr[i];
435 }
436
437 switch (channel->type) {
438 case VXGE_HW_CHANNEL_TYPE_FIFO:
439 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
440 channel->stats = &((struct __vxge_hw_fifo *)
441 channel)->stats->common_stats;
442 break;
443 case VXGE_HW_CHANNEL_TYPE_RING:
444 vpath->ringh = (struct __vxge_hw_ring *)channel;
445 channel->stats = &((struct __vxge_hw_ring *)
446 channel)->stats->common_stats;
447 break;
448 default:
449 break;
450 }
451
452 return VXGE_HW_OK;
453 }
454
455 /*
456 * __vxge_hw_channel_reset - Resets a channel
457 * This function resets a channel by properly setting the various references
458 */
459 static enum vxge_hw_status
460 __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
461 {
462 u32 i;
463
464 for (i = 0; i < channel->length; i++) {
465 if (channel->reserve_arr != NULL)
466 channel->reserve_arr[i] = channel->orig_arr[i];
467 if (channel->free_arr != NULL)
468 channel->free_arr[i] = NULL;
469 if (channel->work_arr != NULL)
470 channel->work_arr[i] = NULL;
471 }
472 channel->free_ptr = channel->length;
473 channel->reserve_ptr = channel->length;
474 channel->reserve_top = 0;
475 channel->post_index = 0;
476 channel->compl_index = 0;
477
478 return VXGE_HW_OK;
479 }
480
481 /*
482 * __vxge_hw_device_pci_e_init
483 * Initialize certain PCI/PCI-X configuration registers
484 * with recommended values. Save config space for future hw resets.
485 */
486 static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
487 {
488 u16 cmd = 0;
489
490 /* Set the PErr Repconse bit and SERR in PCI command register. */
491 pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
492 cmd |= 0x140;
493 pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
494
495 pci_save_state(hldev->pdev);
496 }
497
498 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
499 * in progress
500 * This routine checks the vpath reset in progress register is turned zero
501 */
502 static enum vxge_hw_status
503 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
504 {
505 enum vxge_hw_status status;
506 status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
507 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
508 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
509 return status;
510 }
511
512 /*
513 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
514 * Set the swapper bits appropriately for the lagacy section.
515 */
516 static enum vxge_hw_status
517 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
518 {
519 u64 val64;
520 enum vxge_hw_status status = VXGE_HW_OK;
521
522 val64 = readq(&legacy_reg->toc_swapper_fb);
523
524 wmb();
525
526 switch (val64) {
527 case VXGE_HW_SWAPPER_INITIAL_VALUE:
528 return status;
529
530 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
531 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
532 &legacy_reg->pifm_rd_swap_en);
533 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
534 &legacy_reg->pifm_rd_flip_en);
535 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
536 &legacy_reg->pifm_wr_swap_en);
537 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
538 &legacy_reg->pifm_wr_flip_en);
539 break;
540
541 case VXGE_HW_SWAPPER_BYTE_SWAPPED:
542 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
543 &legacy_reg->pifm_rd_swap_en);
544 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
545 &legacy_reg->pifm_wr_swap_en);
546 break;
547
548 case VXGE_HW_SWAPPER_BIT_FLIPPED:
549 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
550 &legacy_reg->pifm_rd_flip_en);
551 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
552 &legacy_reg->pifm_wr_flip_en);
553 break;
554 }
555
556 wmb();
557
558 val64 = readq(&legacy_reg->toc_swapper_fb);
559
560 if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
561 status = VXGE_HW_ERR_SWAPPER_CTRL;
562
563 return status;
564 }
565
566 /*
567 * __vxge_hw_device_toc_get
568 * This routine sets the swapper and reads the toc pointer and returns the
569 * memory mapped address of the toc
570 */
571 static struct vxge_hw_toc_reg __iomem *
572 __vxge_hw_device_toc_get(void __iomem *bar0)
573 {
574 u64 val64;
575 struct vxge_hw_toc_reg __iomem *toc = NULL;
576 enum vxge_hw_status status;
577
578 struct vxge_hw_legacy_reg __iomem *legacy_reg =
579 (struct vxge_hw_legacy_reg __iomem *)bar0;
580
581 status = __vxge_hw_legacy_swapper_set(legacy_reg);
582 if (status != VXGE_HW_OK)
583 goto exit;
584
585 val64 = readq(&legacy_reg->toc_first_pointer);
586 toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
587 exit:
588 return toc;
589 }
590
591 /*
592 * __vxge_hw_device_reg_addr_get
593 * This routine sets the swapper and reads the toc pointer and initializes the
594 * register location pointers in the device object. It waits until the ric is
595 * completed initializing registers.
596 */
597 static enum vxge_hw_status
598 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
599 {
600 u64 val64;
601 u32 i;
602 enum vxge_hw_status status = VXGE_HW_OK;
603
604 hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
605
606 hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
607 if (hldev->toc_reg == NULL) {
608 status = VXGE_HW_FAIL;
609 goto exit;
610 }
611
612 val64 = readq(&hldev->toc_reg->toc_common_pointer);
613 hldev->common_reg =
614 (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
615
616 val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
617 hldev->mrpcim_reg =
618 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
619
620 for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
621 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
622 hldev->srpcim_reg[i] =
623 (struct vxge_hw_srpcim_reg __iomem *)
624 (hldev->bar0 + val64);
625 }
626
627 for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
628 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
629 hldev->vpmgmt_reg[i] =
630 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
631 }
632
633 for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
634 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
635 hldev->vpath_reg[i] =
636 (struct vxge_hw_vpath_reg __iomem *)
637 (hldev->bar0 + val64);
638 }
639
640 val64 = readq(&hldev->toc_reg->toc_kdfc);
641
642 switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
643 case 0:
644 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
645 VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
646 break;
647 default:
648 break;
649 }
650
651 status = __vxge_hw_device_vpath_reset_in_prog_check(
652 (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
653 exit:
654 return status;
655 }
656
657 /*
658 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
659 * This routine returns the Access Rights of the driver
660 */
661 static u32
662 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
663 {
664 u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
665
666 switch (host_type) {
667 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
668 if (func_id == 0) {
669 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
670 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
671 }
672 break;
673 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
674 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
675 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
676 break;
677 case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
678 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
679 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
680 break;
681 case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
682 case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
683 case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
684 break;
685 case VXGE_HW_SR_VH_FUNCTION0:
686 case VXGE_HW_VH_NORMAL_FUNCTION:
687 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
688 break;
689 }
690
691 return access_rights;
692 }
693 /*
694 * __vxge_hw_device_is_privilaged
695 * This routine checks if the device function is privilaged or not
696 */
697
698 enum vxge_hw_status
699 __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
700 {
701 if (__vxge_hw_device_access_rights_get(host_type,
702 func_id) &
703 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
704 return VXGE_HW_OK;
705 else
706 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
707 }
708
709 /*
710 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
711 * Returns the function number of the vpath.
712 */
713 static u32
714 __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
715 {
716 u64 val64;
717
718 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
719
720 return
721 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
722 }
723
724 /*
725 * __vxge_hw_device_host_info_get
726 * This routine returns the host type assignments
727 */
728 static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
729 {
730 u64 val64;
731 u32 i;
732
733 val64 = readq(&hldev->common_reg->host_type_assignments);
734
735 hldev->host_type =
736 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
737
738 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
739
740 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
741 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
742 continue;
743
744 hldev->func_id =
745 __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
746
747 hldev->access_rights = __vxge_hw_device_access_rights_get(
748 hldev->host_type, hldev->func_id);
749
750 hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
751 hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
752
753 hldev->first_vp_id = i;
754 break;
755 }
756 }
757
758 /*
759 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
760 * link width and signalling rate.
761 */
762 static enum vxge_hw_status
763 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
764 {
765 int exp_cap;
766 u16 lnk;
767
768 /* Get the negotiated link width and speed from PCI config space */
769 exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
770 pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
771
772 if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
773 return VXGE_HW_ERR_INVALID_PCI_INFO;
774
775 switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
776 case PCIE_LNK_WIDTH_RESRV:
777 case PCIE_LNK_X1:
778 case PCIE_LNK_X2:
779 case PCIE_LNK_X4:
780 case PCIE_LNK_X8:
781 break;
782 default:
783 return VXGE_HW_ERR_INVALID_PCI_INFO;
784 }
785
786 return VXGE_HW_OK;
787 }
788
789 /*
790 * __vxge_hw_device_initialize
791 * Initialize Titan-V hardware.
792 */
793 static enum vxge_hw_status
794 __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
795 {
796 enum vxge_hw_status status = VXGE_HW_OK;
797
798 if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
799 hldev->func_id)) {
800 /* Validate the pci-e link width and speed */
801 status = __vxge_hw_verify_pci_e_info(hldev);
802 if (status != VXGE_HW_OK)
803 goto exit;
804 }
805
806 exit:
807 return status;
808 }
809
810 /*
811 * __vxge_hw_vpath_fw_ver_get - Get the fw version
812 * Returns FW Version
813 */
814 static enum vxge_hw_status
815 __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
816 struct vxge_hw_device_hw_info *hw_info)
817 {
818 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
819 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
820 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
821 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
822 u64 data0, data1 = 0, steer_ctrl = 0;
823 enum vxge_hw_status status;
824
825 status = vxge_hw_vpath_fw_api(vpath,
826 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
827 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
828 0, &data0, &data1, &steer_ctrl);
829 if (status != VXGE_HW_OK)
830 goto exit;
831
832 fw_date->day =
833 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
834 fw_date->month =
835 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
836 fw_date->year =
837 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
838
839 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
840 fw_date->month, fw_date->day, fw_date->year);
841
842 fw_version->major =
843 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
844 fw_version->minor =
845 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
846 fw_version->build =
847 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
848
849 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
850 fw_version->major, fw_version->minor, fw_version->build);
851
852 flash_date->day =
853 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
854 flash_date->month =
855 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
856 flash_date->year =
857 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
858
859 snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
860 flash_date->month, flash_date->day, flash_date->year);
861
862 flash_version->major =
863 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
864 flash_version->minor =
865 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
866 flash_version->build =
867 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
868
869 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
870 flash_version->major, flash_version->minor,
871 flash_version->build);
872
873 exit:
874 return status;
875 }
876
877 /*
878 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
879 * part number and product description.
880 */
881 static enum vxge_hw_status
882 __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
883 struct vxge_hw_device_hw_info *hw_info)
884 {
885 enum vxge_hw_status status;
886 u64 data0, data1 = 0, steer_ctrl = 0;
887 u8 *serial_number = hw_info->serial_number;
888 u8 *part_number = hw_info->part_number;
889 u8 *product_desc = hw_info->product_desc;
890 u32 i, j = 0;
891
892 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
893
894 status = vxge_hw_vpath_fw_api(vpath,
895 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
896 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
897 0, &data0, &data1, &steer_ctrl);
898 if (status != VXGE_HW_OK)
899 return status;
900
901 ((u64 *)serial_number)[0] = be64_to_cpu(data0);
902 ((u64 *)serial_number)[1] = be64_to_cpu(data1);
903
904 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
905 data1 = steer_ctrl = 0;
906
907 status = vxge_hw_vpath_fw_api(vpath,
908 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
909 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
910 0, &data0, &data1, &steer_ctrl);
911 if (status != VXGE_HW_OK)
912 return status;
913
914 ((u64 *)part_number)[0] = be64_to_cpu(data0);
915 ((u64 *)part_number)[1] = be64_to_cpu(data1);
916
917 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
918 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
919 data0 = i;
920 data1 = steer_ctrl = 0;
921
922 status = vxge_hw_vpath_fw_api(vpath,
923 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
924 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
925 0, &data0, &data1, &steer_ctrl);
926 if (status != VXGE_HW_OK)
927 return status;
928
929 ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
930 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
931 }
932
933 return status;
934 }
935
936 /*
937 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
938 * Returns pci function mode
939 */
940 static enum vxge_hw_status
941 __vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
942 struct vxge_hw_device_hw_info *hw_info)
943 {
944 u64 data0, data1 = 0, steer_ctrl = 0;
945 enum vxge_hw_status status;
946
947 data0 = 0;
948
949 status = vxge_hw_vpath_fw_api(vpath,
950 VXGE_HW_FW_API_GET_FUNC_MODE,
951 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
952 0, &data0, &data1, &steer_ctrl);
953 if (status != VXGE_HW_OK)
954 return status;
955
956 hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
957 return status;
958 }
959
960 /*
961 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
962 * from MAC address table.
963 */
964 static enum vxge_hw_status
965 __vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
966 u8 *macaddr, u8 *macaddr_mask)
967 {
968 u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
969 data0 = 0, data1 = 0, steer_ctrl = 0;
970 enum vxge_hw_status status;
971 int i;
972
973 do {
974 status = vxge_hw_vpath_fw_api(vpath, action,
975 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
976 0, &data0, &data1, &steer_ctrl);
977 if (status != VXGE_HW_OK)
978 goto exit;
979
980 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
981 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
982 data1);
983
984 for (i = ETH_ALEN; i > 0; i--) {
985 macaddr[i - 1] = (u8) (data0 & 0xFF);
986 data0 >>= 8;
987
988 macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
989 data1 >>= 8;
990 }
991
992 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
993 data0 = 0, data1 = 0, steer_ctrl = 0;
994
995 } while (!is_valid_ether_addr(macaddr));
996 exit:
997 return status;
998 }
999
1000 /**
1001 * vxge_hw_device_hw_info_get - Get the hw information
1002 * Returns the vpath mask that has the bits set for each vpath allocated
1003 * for the driver, FW version information and the first mac addresse for
1004 * each vpath
1005 */
1006 enum vxge_hw_status __devinit
1007 vxge_hw_device_hw_info_get(void __iomem *bar0,
1008 struct vxge_hw_device_hw_info *hw_info)
1009 {
1010 u32 i;
1011 u64 val64;
1012 struct vxge_hw_toc_reg __iomem *toc;
1013 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
1014 struct vxge_hw_common_reg __iomem *common_reg;
1015 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
1016 enum vxge_hw_status status;
1017 struct __vxge_hw_virtualpath vpath;
1018
1019 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
1020
1021 toc = __vxge_hw_device_toc_get(bar0);
1022 if (toc == NULL) {
1023 status = VXGE_HW_ERR_CRITICAL;
1024 goto exit;
1025 }
1026
1027 val64 = readq(&toc->toc_common_pointer);
1028 common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
1029
1030 status = __vxge_hw_device_vpath_reset_in_prog_check(
1031 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
1032 if (status != VXGE_HW_OK)
1033 goto exit;
1034
1035 hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
1036
1037 val64 = readq(&common_reg->host_type_assignments);
1038
1039 hw_info->host_type =
1040 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
1041
1042 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1043 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1044 continue;
1045
1046 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
1047
1048 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
1049 (bar0 + val64);
1050
1051 hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
1052 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
1053 hw_info->func_id) &
1054 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
1055
1056 val64 = readq(&toc->toc_mrpcim_pointer);
1057
1058 mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
1059 (bar0 + val64);
1060
1061 writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
1062 wmb();
1063 }
1064
1065 val64 = readq(&toc->toc_vpath_pointer[i]);
1066
1067 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1068 (bar0 + val64);
1069 vpath.vp_open = 0;
1070
1071 status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
1072 if (status != VXGE_HW_OK)
1073 goto exit;
1074
1075 status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
1076 if (status != VXGE_HW_OK)
1077 goto exit;
1078
1079 status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
1080 if (status != VXGE_HW_OK)
1081 goto exit;
1082
1083 break;
1084 }
1085
1086 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1087 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1088 continue;
1089
1090 val64 = readq(&toc->toc_vpath_pointer[i]);
1091 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1092 (bar0 + val64);
1093 vpath.vp_open = 0;
1094
1095 status = __vxge_hw_vpath_addr_get(&vpath,
1096 hw_info->mac_addrs[i],
1097 hw_info->mac_addr_masks[i]);
1098 if (status != VXGE_HW_OK)
1099 goto exit;
1100 }
1101 exit:
1102 return status;
1103 }
1104
1105 /*
1106 * __vxge_hw_blockpool_destroy - Deallocates the block pool
1107 */
1108 static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
1109 {
1110 struct __vxge_hw_device *hldev;
1111 struct list_head *p, *n;
1112 u16 ret;
1113
1114 if (blockpool == NULL) {
1115 ret = 1;
1116 goto exit;
1117 }
1118
1119 hldev = blockpool->hldev;
1120
1121 list_for_each_safe(p, n, &blockpool->free_block_list) {
1122 pci_unmap_single(hldev->pdev,
1123 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
1124 ((struct __vxge_hw_blockpool_entry *)p)->length,
1125 PCI_DMA_BIDIRECTIONAL);
1126
1127 vxge_os_dma_free(hldev->pdev,
1128 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
1129 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
1130
1131 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1132 kfree(p);
1133 blockpool->pool_size--;
1134 }
1135
1136 list_for_each_safe(p, n, &blockpool->free_entry_list) {
1137 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1138 kfree((void *)p);
1139 }
1140 ret = 0;
1141 exit:
1142 return;
1143 }
1144
1145 /*
1146 * __vxge_hw_blockpool_create - Create block pool
1147 */
1148 static enum vxge_hw_status
1149 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1150 struct __vxge_hw_blockpool *blockpool,
1151 u32 pool_size,
1152 u32 pool_max)
1153 {
1154 u32 i;
1155 struct __vxge_hw_blockpool_entry *entry = NULL;
1156 void *memblock;
1157 dma_addr_t dma_addr;
1158 struct pci_dev *dma_handle;
1159 struct pci_dev *acc_handle;
1160 enum vxge_hw_status status = VXGE_HW_OK;
1161
1162 if (blockpool == NULL) {
1163 status = VXGE_HW_FAIL;
1164 goto blockpool_create_exit;
1165 }
1166
1167 blockpool->hldev = hldev;
1168 blockpool->block_size = VXGE_HW_BLOCK_SIZE;
1169 blockpool->pool_size = 0;
1170 blockpool->pool_max = pool_max;
1171 blockpool->req_out = 0;
1172
1173 INIT_LIST_HEAD(&blockpool->free_block_list);
1174 INIT_LIST_HEAD(&blockpool->free_entry_list);
1175
1176 for (i = 0; i < pool_size + pool_max; i++) {
1177 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1178 GFP_KERNEL);
1179 if (entry == NULL) {
1180 __vxge_hw_blockpool_destroy(blockpool);
1181 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1182 goto blockpool_create_exit;
1183 }
1184 list_add(&entry->item, &blockpool->free_entry_list);
1185 }
1186
1187 for (i = 0; i < pool_size; i++) {
1188 memblock = vxge_os_dma_malloc(
1189 hldev->pdev,
1190 VXGE_HW_BLOCK_SIZE,
1191 &dma_handle,
1192 &acc_handle);
1193 if (memblock == NULL) {
1194 __vxge_hw_blockpool_destroy(blockpool);
1195 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1196 goto blockpool_create_exit;
1197 }
1198
1199 dma_addr = pci_map_single(hldev->pdev, memblock,
1200 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
1201 if (unlikely(pci_dma_mapping_error(hldev->pdev,
1202 dma_addr))) {
1203 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
1204 __vxge_hw_blockpool_destroy(blockpool);
1205 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1206 goto blockpool_create_exit;
1207 }
1208
1209 if (!list_empty(&blockpool->free_entry_list))
1210 entry = (struct __vxge_hw_blockpool_entry *)
1211 list_first_entry(&blockpool->free_entry_list,
1212 struct __vxge_hw_blockpool_entry,
1213 item);
1214
1215 if (entry == NULL)
1216 entry =
1217 kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1218 GFP_KERNEL);
1219 if (entry != NULL) {
1220 list_del(&entry->item);
1221 entry->length = VXGE_HW_BLOCK_SIZE;
1222 entry->memblock = memblock;
1223 entry->dma_addr = dma_addr;
1224 entry->acc_handle = acc_handle;
1225 entry->dma_handle = dma_handle;
1226 list_add(&entry->item,
1227 &blockpool->free_block_list);
1228 blockpool->pool_size++;
1229 } else {
1230 __vxge_hw_blockpool_destroy(blockpool);
1231 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1232 goto blockpool_create_exit;
1233 }
1234 }
1235
1236 blockpool_create_exit:
1237 return status;
1238 }
1239
1240 /*
1241 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1242 * Check the fifo configuration
1243 */
1244 static enum vxge_hw_status
1245 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1246 {
1247 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1248 (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1249 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1250
1251 return VXGE_HW_OK;
1252 }
1253
1254 /*
1255 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1256 * Check the vpath configuration
1257 */
1258 static enum vxge_hw_status
1259 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1260 {
1261 enum vxge_hw_status status;
1262
1263 if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1264 (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
1265 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1266
1267 status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1268 if (status != VXGE_HW_OK)
1269 return status;
1270
1271 if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1272 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1273 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1274 return VXGE_HW_BADCFG_VPATH_MTU;
1275
1276 if ((vp_config->rpa_strip_vlan_tag !=
1277 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1278 (vp_config->rpa_strip_vlan_tag !=
1279 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1280 (vp_config->rpa_strip_vlan_tag !=
1281 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1282 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1283
1284 return VXGE_HW_OK;
1285 }
1286
1287 /*
1288 * __vxge_hw_device_config_check - Check device configuration.
1289 * Check the device configuration
1290 */
1291 static enum vxge_hw_status
1292 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1293 {
1294 u32 i;
1295 enum vxge_hw_status status;
1296
1297 if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1298 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1299 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1300 (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1301 return VXGE_HW_BADCFG_INTR_MODE;
1302
1303 if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1304 (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1305 return VXGE_HW_BADCFG_RTS_MAC_EN;
1306
1307 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1308 status = __vxge_hw_device_vpath_config_check(
1309 &new_config->vp_config[i]);
1310 if (status != VXGE_HW_OK)
1311 return status;
1312 }
1313
1314 return VXGE_HW_OK;
1315 }
1316
1317 /*
1318 * vxge_hw_device_initialize - Initialize Titan device.
1319 * Initialize Titan device. Note that all the arguments of this public API
1320 * are 'IN', including @hldev. Driver cooperates with
1321 * OS to find new Titan device, locate its PCI and memory spaces.
1322 *
1323 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
1324 * to enable the latter to perform Titan hardware initialization.
1325 */
1326 enum vxge_hw_status __devinit
1327 vxge_hw_device_initialize(
1328 struct __vxge_hw_device **devh,
1329 struct vxge_hw_device_attr *attr,
1330 struct vxge_hw_device_config *device_config)
1331 {
1332 u32 i;
1333 u32 nblocks = 0;
1334 struct __vxge_hw_device *hldev = NULL;
1335 enum vxge_hw_status status = VXGE_HW_OK;
1336
1337 status = __vxge_hw_device_config_check(device_config);
1338 if (status != VXGE_HW_OK)
1339 goto exit;
1340
1341 hldev = vzalloc(sizeof(struct __vxge_hw_device));
1342 if (hldev == NULL) {
1343 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1344 goto exit;
1345 }
1346
1347 hldev->magic = VXGE_HW_DEVICE_MAGIC;
1348
1349 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
1350
1351 /* apply config */
1352 memcpy(&hldev->config, device_config,
1353 sizeof(struct vxge_hw_device_config));
1354
1355 hldev->bar0 = attr->bar0;
1356 hldev->pdev = attr->pdev;
1357
1358 hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
1359 hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
1360 hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
1361
1362 __vxge_hw_device_pci_e_init(hldev);
1363
1364 status = __vxge_hw_device_reg_addr_get(hldev);
1365 if (status != VXGE_HW_OK) {
1366 vfree(hldev);
1367 goto exit;
1368 }
1369
1370 __vxge_hw_device_host_info_get(hldev);
1371
1372 /* Incrementing for stats blocks */
1373 nblocks++;
1374
1375 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1376 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
1377 continue;
1378
1379 if (device_config->vp_config[i].ring.enable ==
1380 VXGE_HW_RING_ENABLE)
1381 nblocks += device_config->vp_config[i].ring.ring_blocks;
1382
1383 if (device_config->vp_config[i].fifo.enable ==
1384 VXGE_HW_FIFO_ENABLE)
1385 nblocks += device_config->vp_config[i].fifo.fifo_blocks;
1386 nblocks++;
1387 }
1388
1389 if (__vxge_hw_blockpool_create(hldev,
1390 &hldev->block_pool,
1391 device_config->dma_blockpool_initial + nblocks,
1392 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
1393
1394 vxge_hw_device_terminate(hldev);
1395 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1396 goto exit;
1397 }
1398
1399 status = __vxge_hw_device_initialize(hldev);
1400 if (status != VXGE_HW_OK) {
1401 vxge_hw_device_terminate(hldev);
1402 goto exit;
1403 }
1404
1405 *devh = hldev;
1406 exit:
1407 return status;
1408 }
1409
1410 /*
1411 * vxge_hw_device_terminate - Terminate Titan device.
1412 * Terminate HW device.
1413 */
1414 void
1415 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
1416 {
1417 vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
1418
1419 hldev->magic = VXGE_HW_DEVICE_DEAD;
1420 __vxge_hw_blockpool_destroy(&hldev->block_pool);
1421 vfree(hldev);
1422 }
1423
1424 /*
1425 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
1426 * and offset and perform an operation
1427 */
1428 static enum vxge_hw_status
1429 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
1430 u32 operation, u32 offset, u64 *stat)
1431 {
1432 u64 val64;
1433 enum vxge_hw_status status = VXGE_HW_OK;
1434 struct vxge_hw_vpath_reg __iomem *vp_reg;
1435
1436 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1437 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1438 goto vpath_stats_access_exit;
1439 }
1440
1441 vp_reg = vpath->vp_reg;
1442
1443 val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
1444 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
1445 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
1446
1447 status = __vxge_hw_pio_mem_write64(val64,
1448 &vp_reg->xmac_stats_access_cmd,
1449 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
1450 vpath->hldev->config.device_poll_millis);
1451 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1452 *stat = readq(&vp_reg->xmac_stats_access_data);
1453 else
1454 *stat = 0;
1455
1456 vpath_stats_access_exit:
1457 return status;
1458 }
1459
1460 /*
1461 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
1462 */
1463 static enum vxge_hw_status
1464 __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
1465 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
1466 {
1467 u64 *val64;
1468 int i;
1469 u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
1470 enum vxge_hw_status status = VXGE_HW_OK;
1471
1472 val64 = (u64 *)vpath_tx_stats;
1473
1474 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1475 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1476 goto exit;
1477 }
1478
1479 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
1480 status = __vxge_hw_vpath_stats_access(vpath,
1481 VXGE_HW_STATS_OP_READ,
1482 offset, val64);
1483 if (status != VXGE_HW_OK)
1484 goto exit;
1485 offset++;
1486 val64++;
1487 }
1488 exit:
1489 return status;
1490 }
1491
1492 /*
1493 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
1494 */
1495 static enum vxge_hw_status
1496 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
1497 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
1498 {
1499 u64 *val64;
1500 enum vxge_hw_status status = VXGE_HW_OK;
1501 int i;
1502 u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
1503 val64 = (u64 *) vpath_rx_stats;
1504
1505 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1506 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1507 goto exit;
1508 }
1509 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
1510 status = __vxge_hw_vpath_stats_access(vpath,
1511 VXGE_HW_STATS_OP_READ,
1512 offset >> 3, val64);
1513 if (status != VXGE_HW_OK)
1514 goto exit;
1515
1516 offset += 8;
1517 val64++;
1518 }
1519 exit:
1520 return status;
1521 }
1522
1523 /*
1524 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
1525 */
1526 static enum vxge_hw_status
1527 __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
1528 struct vxge_hw_vpath_stats_hw_info *hw_stats)
1529 {
1530 u64 val64;
1531 enum vxge_hw_status status = VXGE_HW_OK;
1532 struct vxge_hw_vpath_reg __iomem *vp_reg;
1533
1534 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1535 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1536 goto exit;
1537 }
1538 vp_reg = vpath->vp_reg;
1539
1540 val64 = readq(&vp_reg->vpath_debug_stats0);
1541 hw_stats->ini_num_mwr_sent =
1542 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
1543
1544 val64 = readq(&vp_reg->vpath_debug_stats1);
1545 hw_stats->ini_num_mrd_sent =
1546 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
1547
1548 val64 = readq(&vp_reg->vpath_debug_stats2);
1549 hw_stats->ini_num_cpl_rcvd =
1550 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
1551
1552 val64 = readq(&vp_reg->vpath_debug_stats3);
1553 hw_stats->ini_num_mwr_byte_sent =
1554 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
1555
1556 val64 = readq(&vp_reg->vpath_debug_stats4);
1557 hw_stats->ini_num_cpl_byte_rcvd =
1558 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
1559
1560 val64 = readq(&vp_reg->vpath_debug_stats5);
1561 hw_stats->wrcrdtarb_xoff =
1562 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
1563
1564 val64 = readq(&vp_reg->vpath_debug_stats6);
1565 hw_stats->rdcrdtarb_xoff =
1566 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
1567
1568 val64 = readq(&vp_reg->vpath_genstats_count01);
1569 hw_stats->vpath_genstats_count0 =
1570 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
1571 val64);
1572
1573 val64 = readq(&vp_reg->vpath_genstats_count01);
1574 hw_stats->vpath_genstats_count1 =
1575 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
1576 val64);
1577
1578 val64 = readq(&vp_reg->vpath_genstats_count23);
1579 hw_stats->vpath_genstats_count2 =
1580 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
1581 val64);
1582
1583 val64 = readq(&vp_reg->vpath_genstats_count01);
1584 hw_stats->vpath_genstats_count3 =
1585 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
1586 val64);
1587
1588 val64 = readq(&vp_reg->vpath_genstats_count4);
1589 hw_stats->vpath_genstats_count4 =
1590 (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
1591 val64);
1592
1593 val64 = readq(&vp_reg->vpath_genstats_count5);
1594 hw_stats->vpath_genstats_count5 =
1595 (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
1596 val64);
1597
1598 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
1599 if (status != VXGE_HW_OK)
1600 goto exit;
1601
1602 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
1603 if (status != VXGE_HW_OK)
1604 goto exit;
1605
1606 VXGE_HW_VPATH_STATS_PIO_READ(
1607 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
1608
1609 hw_stats->prog_event_vnum0 =
1610 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
1611
1612 hw_stats->prog_event_vnum1 =
1613 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
1614
1615 VXGE_HW_VPATH_STATS_PIO_READ(
1616 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
1617
1618 hw_stats->prog_event_vnum2 =
1619 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
1620
1621 hw_stats->prog_event_vnum3 =
1622 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
1623
1624 val64 = readq(&vp_reg->rx_multi_cast_stats);
1625 hw_stats->rx_multi_cast_frame_discard =
1626 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
1627
1628 val64 = readq(&vp_reg->rx_frm_transferred);
1629 hw_stats->rx_frm_transferred =
1630 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
1631
1632 val64 = readq(&vp_reg->rxd_returned);
1633 hw_stats->rxd_returned =
1634 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
1635
1636 val64 = readq(&vp_reg->dbg_stats_rx_mpa);
1637 hw_stats->rx_mpa_len_fail_frms =
1638 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
1639 hw_stats->rx_mpa_mrk_fail_frms =
1640 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
1641 hw_stats->rx_mpa_crc_fail_frms =
1642 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
1643
1644 val64 = readq(&vp_reg->dbg_stats_rx_fau);
1645 hw_stats->rx_permitted_frms =
1646 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
1647 hw_stats->rx_vp_reset_discarded_frms =
1648 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
1649 hw_stats->rx_wol_frms =
1650 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
1651
1652 val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
1653 hw_stats->tx_vp_reset_discarded_frms =
1654 (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
1655 val64);
1656 exit:
1657 return status;
1658 }
1659
1660 /*
1661 * vxge_hw_device_stats_get - Get the device hw statistics.
1662 * Returns the vpath h/w stats for the device.
1663 */
1664 enum vxge_hw_status
1665 vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
1666 struct vxge_hw_device_stats_hw_info *hw_stats)
1667 {
1668 u32 i;
1669 enum vxge_hw_status status = VXGE_HW_OK;
1670
1671 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1672 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
1673 (hldev->virtual_paths[i].vp_open ==
1674 VXGE_HW_VP_NOT_OPEN))
1675 continue;
1676
1677 memcpy(hldev->virtual_paths[i].hw_stats_sav,
1678 hldev->virtual_paths[i].hw_stats,
1679 sizeof(struct vxge_hw_vpath_stats_hw_info));
1680
1681 status = __vxge_hw_vpath_stats_get(
1682 &hldev->virtual_paths[i],
1683 hldev->virtual_paths[i].hw_stats);
1684 }
1685
1686 memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
1687 sizeof(struct vxge_hw_device_stats_hw_info));
1688
1689 return status;
1690 }
1691
1692 /*
1693 * vxge_hw_driver_stats_get - Get the device sw statistics.
1694 * Returns the vpath s/w stats for the device.
1695 */
1696 enum vxge_hw_status vxge_hw_driver_stats_get(
1697 struct __vxge_hw_device *hldev,
1698 struct vxge_hw_device_stats_sw_info *sw_stats)
1699 {
1700 enum vxge_hw_status status = VXGE_HW_OK;
1701
1702 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
1703 sizeof(struct vxge_hw_device_stats_sw_info));
1704
1705 return status;
1706 }
1707
1708 /*
1709 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
1710 * and offset and perform an operation
1711 * Get the statistics from the given location and offset.
1712 */
1713 enum vxge_hw_status
1714 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
1715 u32 operation, u32 location, u32 offset, u64 *stat)
1716 {
1717 u64 val64;
1718 enum vxge_hw_status status = VXGE_HW_OK;
1719
1720 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1721 hldev->func_id);
1722 if (status != VXGE_HW_OK)
1723 goto exit;
1724
1725 val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
1726 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
1727 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
1728 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
1729
1730 status = __vxge_hw_pio_mem_write64(val64,
1731 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
1732 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
1733 hldev->config.device_poll_millis);
1734
1735 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1736 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
1737 else
1738 *stat = 0;
1739 exit:
1740 return status;
1741 }
1742
1743 /*
1744 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
1745 * Get the Statistics on aggregate port
1746 */
1747 static enum vxge_hw_status
1748 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
1749 struct vxge_hw_xmac_aggr_stats *aggr_stats)
1750 {
1751 u64 *val64;
1752 int i;
1753 u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
1754 enum vxge_hw_status status = VXGE_HW_OK;
1755
1756 val64 = (u64 *)aggr_stats;
1757
1758 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1759 hldev->func_id);
1760 if (status != VXGE_HW_OK)
1761 goto exit;
1762
1763 for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
1764 status = vxge_hw_mrpcim_stats_access(hldev,
1765 VXGE_HW_STATS_OP_READ,
1766 VXGE_HW_STATS_LOC_AGGR,
1767 ((offset + (104 * port)) >> 3), val64);
1768 if (status != VXGE_HW_OK)
1769 goto exit;
1770
1771 offset += 8;
1772 val64++;
1773 }
1774 exit:
1775 return status;
1776 }
1777
1778 /*
1779 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1780 * Get the Statistics on port
1781 */
1782 static enum vxge_hw_status
1783 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1784 struct vxge_hw_xmac_port_stats *port_stats)
1785 {
1786 u64 *val64;
1787 enum vxge_hw_status status = VXGE_HW_OK;
1788 int i;
1789 u32 offset = 0x0;
1790 val64 = (u64 *) port_stats;
1791
1792 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1793 hldev->func_id);
1794 if (status != VXGE_HW_OK)
1795 goto exit;
1796
1797 for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1798 status = vxge_hw_mrpcim_stats_access(hldev,
1799 VXGE_HW_STATS_OP_READ,
1800 VXGE_HW_STATS_LOC_AGGR,
1801 ((offset + (608 * port)) >> 3), val64);
1802 if (status != VXGE_HW_OK)
1803 goto exit;
1804
1805 offset += 8;
1806 val64++;
1807 }
1808
1809 exit:
1810 return status;
1811 }
1812
1813 /*
1814 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1815 * Get the XMAC Statistics
1816 */
1817 enum vxge_hw_status
1818 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1819 struct vxge_hw_xmac_stats *xmac_stats)
1820 {
1821 enum vxge_hw_status status = VXGE_HW_OK;
1822 u32 i;
1823
1824 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1825 0, &xmac_stats->aggr_stats[0]);
1826 if (status != VXGE_HW_OK)
1827 goto exit;
1828
1829 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1830 1, &xmac_stats->aggr_stats[1]);
1831 if (status != VXGE_HW_OK)
1832 goto exit;
1833
1834 for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1835
1836 status = vxge_hw_device_xmac_port_stats_get(hldev,
1837 i, &xmac_stats->port_stats[i]);
1838 if (status != VXGE_HW_OK)
1839 goto exit;
1840 }
1841
1842 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1843
1844 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1845 continue;
1846
1847 status = __vxge_hw_vpath_xmac_tx_stats_get(
1848 &hldev->virtual_paths[i],
1849 &xmac_stats->vpath_tx_stats[i]);
1850 if (status != VXGE_HW_OK)
1851 goto exit;
1852
1853 status = __vxge_hw_vpath_xmac_rx_stats_get(
1854 &hldev->virtual_paths[i],
1855 &xmac_stats->vpath_rx_stats[i]);
1856 if (status != VXGE_HW_OK)
1857 goto exit;
1858 }
1859 exit:
1860 return status;
1861 }
1862
1863 /*
1864 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1865 * This routine is used to dynamically change the debug output
1866 */
1867 void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1868 enum vxge_debug_level level, u32 mask)
1869 {
1870 if (hldev == NULL)
1871 return;
1872
1873 #if defined(VXGE_DEBUG_TRACE_MASK) || \
1874 defined(VXGE_DEBUG_ERR_MASK)
1875 hldev->debug_module_mask = mask;
1876 hldev->debug_level = level;
1877 #endif
1878
1879 #if defined(VXGE_DEBUG_ERR_MASK)
1880 hldev->level_err = level & VXGE_ERR;
1881 #endif
1882
1883 #if defined(VXGE_DEBUG_TRACE_MASK)
1884 hldev->level_trace = level & VXGE_TRACE;
1885 #endif
1886 }
1887
1888 /*
1889 * vxge_hw_device_error_level_get - Get the error level
1890 * This routine returns the current error level set
1891 */
1892 u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1893 {
1894 #if defined(VXGE_DEBUG_ERR_MASK)
1895 if (hldev == NULL)
1896 return VXGE_ERR;
1897 else
1898 return hldev->level_err;
1899 #else
1900 return 0;
1901 #endif
1902 }
1903
1904 /*
1905 * vxge_hw_device_trace_level_get - Get the trace level
1906 * This routine returns the current trace level set
1907 */
1908 u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1909 {
1910 #if defined(VXGE_DEBUG_TRACE_MASK)
1911 if (hldev == NULL)
1912 return VXGE_TRACE;
1913 else
1914 return hldev->level_trace;
1915 #else
1916 return 0;
1917 #endif
1918 }
1919
1920 /*
1921 * vxge_hw_getpause_data -Pause frame frame generation and reception.
1922 * Returns the Pause frame generation and reception capability of the NIC.
1923 */
1924 enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1925 u32 port, u32 *tx, u32 *rx)
1926 {
1927 u64 val64;
1928 enum vxge_hw_status status = VXGE_HW_OK;
1929
1930 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1931 status = VXGE_HW_ERR_INVALID_DEVICE;
1932 goto exit;
1933 }
1934
1935 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1936 status = VXGE_HW_ERR_INVALID_PORT;
1937 goto exit;
1938 }
1939
1940 if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1941 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1942 goto exit;
1943 }
1944
1945 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1946 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1947 *tx = 1;
1948 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1949 *rx = 1;
1950 exit:
1951 return status;
1952 }
1953
1954 /*
1955 * vxge_hw_device_setpause_data - set/reset pause frame generation.
1956 * It can be used to set or reset Pause frame generation or reception
1957 * support of the NIC.
1958 */
1959 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1960 u32 port, u32 tx, u32 rx)
1961 {
1962 u64 val64;
1963 enum vxge_hw_status status = VXGE_HW_OK;
1964
1965 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1966 status = VXGE_HW_ERR_INVALID_DEVICE;
1967 goto exit;
1968 }
1969
1970 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1971 status = VXGE_HW_ERR_INVALID_PORT;
1972 goto exit;
1973 }
1974
1975 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1976 hldev->func_id);
1977 if (status != VXGE_HW_OK)
1978 goto exit;
1979
1980 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1981 if (tx)
1982 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1983 else
1984 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1985 if (rx)
1986 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1987 else
1988 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1989
1990 writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1991 exit:
1992 return status;
1993 }
1994
1995 u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1996 {
1997 int link_width, exp_cap;
1998 u16 lnk;
1999
2000 exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
2001 pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
2002 link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
2003 return link_width;
2004 }
2005
2006 /*
2007 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
2008 * This function returns the index of memory block
2009 */
2010 static inline u32
2011 __vxge_hw_ring_block_memblock_idx(u8 *block)
2012 {
2013 return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
2014 }
2015
2016 /*
2017 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
2018 * This function sets index to a memory block
2019 */
2020 static inline void
2021 __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
2022 {
2023 *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
2024 }
2025
2026 /*
2027 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
2028 * in RxD block
2029 * Sets the next block pointer in RxD block
2030 */
2031 static inline void
2032 __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
2033 {
2034 *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
2035 }
2036
2037 /*
2038 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
2039 * first block
2040 * Returns the dma address of the first RxD block
2041 */
2042 static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
2043 {
2044 struct vxge_hw_mempool_dma *dma_object;
2045
2046 dma_object = ring->mempool->memblocks_dma_arr;
2047 vxge_assert(dma_object != NULL);
2048
2049 return dma_object->addr;
2050 }
2051
2052 /*
2053 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
2054 * This function returns the dma address of a given item
2055 */
2056 static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
2057 void *item)
2058 {
2059 u32 memblock_idx;
2060 void *memblock;
2061 struct vxge_hw_mempool_dma *memblock_dma_object;
2062 ptrdiff_t dma_item_offset;
2063
2064 /* get owner memblock index */
2065 memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
2066
2067 /* get owner memblock by memblock index */
2068 memblock = mempoolh->memblocks_arr[memblock_idx];
2069
2070 /* get memblock DMA object by memblock index */
2071 memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
2072
2073 /* calculate offset in the memblock of this item */
2074 dma_item_offset = (u8 *)item - (u8 *)memblock;
2075
2076 return memblock_dma_object->addr + dma_item_offset;
2077 }
2078
2079 /*
2080 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
2081 * This function returns the dma address of a given item
2082 */
2083 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
2084 struct __vxge_hw_ring *ring, u32 from,
2085 u32 to)
2086 {
2087 u8 *to_item , *from_item;
2088 dma_addr_t to_dma;
2089
2090 /* get "from" RxD block */
2091 from_item = mempoolh->items_arr[from];
2092 vxge_assert(from_item);
2093
2094 /* get "to" RxD block */
2095 to_item = mempoolh->items_arr[to];
2096 vxge_assert(to_item);
2097
2098 /* return address of the beginning of previous RxD block */
2099 to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
2100
2101 /* set next pointer for this RxD block to point on
2102 * previous item's DMA start address */
2103 __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
2104 }
2105
2106 /*
2107 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
2108 * block callback
2109 * This function is callback passed to __vxge_hw_mempool_create to create memory
2110 * pool for RxD block
2111 */
2112 static void
2113 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
2114 u32 memblock_index,
2115 struct vxge_hw_mempool_dma *dma_object,
2116 u32 index, u32 is_last)
2117 {
2118 u32 i;
2119 void *item = mempoolh->items_arr[index];
2120 struct __vxge_hw_ring *ring =
2121 (struct __vxge_hw_ring *)mempoolh->userdata;
2122
2123 /* format rxds array */
2124 for (i = 0; i < ring->rxds_per_block; i++) {
2125 void *rxdblock_priv;
2126 void *uld_priv;
2127 struct vxge_hw_ring_rxd_1 *rxdp;
2128
2129 u32 reserve_index = ring->channel.reserve_ptr -
2130 (index * ring->rxds_per_block + i + 1);
2131 u32 memblock_item_idx;
2132
2133 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
2134 i * ring->rxd_size;
2135
2136 /* Note: memblock_item_idx is index of the item within
2137 * the memblock. For instance, in case of three RxD-blocks
2138 * per memblock this value can be 0, 1 or 2. */
2139 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
2140 memblock_index, item,
2141 &memblock_item_idx);
2142
2143 rxdp = (struct vxge_hw_ring_rxd_1 *)
2144 ring->channel.reserve_arr[reserve_index];
2145
2146 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
2147
2148 /* pre-format Host_Control */
2149 rxdp->host_control = (u64)(size_t)uld_priv;
2150 }
2151
2152 __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
2153
2154 if (is_last) {
2155 /* link last one with first one */
2156 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
2157 }
2158
2159 if (index > 0) {
2160 /* link this RxD block with previous one */
2161 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
2162 }
2163 }
2164
2165 /*
2166 * __vxge_hw_ring_replenish - Initial replenish of RxDs
2167 * This function replenishes the RxDs from reserve array to work array
2168 */
2169 enum vxge_hw_status
2170 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
2171 {
2172 void *rxd;
2173 struct __vxge_hw_channel *channel;
2174 enum vxge_hw_status status = VXGE_HW_OK;
2175
2176 channel = &ring->channel;
2177
2178 while (vxge_hw_channel_dtr_count(channel) > 0) {
2179
2180 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
2181
2182 vxge_assert(status == VXGE_HW_OK);
2183
2184 if (ring->rxd_init) {
2185 status = ring->rxd_init(rxd, channel->userdata);
2186 if (status != VXGE_HW_OK) {
2187 vxge_hw_ring_rxd_free(ring, rxd);
2188 goto exit;
2189 }
2190 }
2191
2192 vxge_hw_ring_rxd_post(ring, rxd);
2193 }
2194 status = VXGE_HW_OK;
2195 exit:
2196 return status;
2197 }
2198
2199 /*
2200 * __vxge_hw_channel_allocate - Allocate memory for channel
2201 * This function allocates required memory for the channel and various arrays
2202 * in the channel
2203 */
2204 static struct __vxge_hw_channel *
2205 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
2206 enum __vxge_hw_channel_type type,
2207 u32 length, u32 per_dtr_space,
2208 void *userdata)
2209 {
2210 struct __vxge_hw_channel *channel;
2211 struct __vxge_hw_device *hldev;
2212 int size = 0;
2213 u32 vp_id;
2214
2215 hldev = vph->vpath->hldev;
2216 vp_id = vph->vpath->vp_id;
2217
2218 switch (type) {
2219 case VXGE_HW_CHANNEL_TYPE_FIFO:
2220 size = sizeof(struct __vxge_hw_fifo);
2221 break;
2222 case VXGE_HW_CHANNEL_TYPE_RING:
2223 size = sizeof(struct __vxge_hw_ring);
2224 break;
2225 default:
2226 break;
2227 }
2228
2229 channel = kzalloc(size, GFP_KERNEL);
2230 if (channel == NULL)
2231 goto exit0;
2232 INIT_LIST_HEAD(&channel->item);
2233
2234 channel->common_reg = hldev->common_reg;
2235 channel->first_vp_id = hldev->first_vp_id;
2236 channel->type = type;
2237 channel->devh = hldev;
2238 channel->vph = vph;
2239 channel->userdata = userdata;
2240 channel->per_dtr_space = per_dtr_space;
2241 channel->length = length;
2242 channel->vp_id = vp_id;
2243
2244 channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2245 if (channel->work_arr == NULL)
2246 goto exit1;
2247
2248 channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2249 if (channel->free_arr == NULL)
2250 goto exit1;
2251 channel->free_ptr = length;
2252
2253 channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2254 if (channel->reserve_arr == NULL)
2255 goto exit1;
2256 channel->reserve_ptr = length;
2257 channel->reserve_top = 0;
2258
2259 channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2260 if (channel->orig_arr == NULL)
2261 goto exit1;
2262
2263 return channel;
2264 exit1:
2265 __vxge_hw_channel_free(channel);
2266
2267 exit0:
2268 return NULL;
2269 }
2270
2271 /*
2272 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
2273 * Adds a block to block pool
2274 */
2275 static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
2276 void *block_addr,
2277 u32 length,
2278 struct pci_dev *dma_h,
2279 struct pci_dev *acc_handle)
2280 {
2281 struct __vxge_hw_blockpool *blockpool;
2282 struct __vxge_hw_blockpool_entry *entry = NULL;
2283 dma_addr_t dma_addr;
2284 enum vxge_hw_status status = VXGE_HW_OK;
2285 u32 req_out;
2286
2287 blockpool = &devh->block_pool;
2288
2289 if (block_addr == NULL) {
2290 blockpool->req_out--;
2291 status = VXGE_HW_FAIL;
2292 goto exit;
2293 }
2294
2295 dma_addr = pci_map_single(devh->pdev, block_addr, length,
2296 PCI_DMA_BIDIRECTIONAL);
2297
2298 if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
2299 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
2300 blockpool->req_out--;
2301 status = VXGE_HW_FAIL;
2302 goto exit;
2303 }
2304
2305 if (!list_empty(&blockpool->free_entry_list))
2306 entry = (struct __vxge_hw_blockpool_entry *)
2307 list_first_entry(&blockpool->free_entry_list,
2308 struct __vxge_hw_blockpool_entry,
2309 item);
2310
2311 if (entry == NULL)
2312 entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
2313 else
2314 list_del(&entry->item);
2315
2316 if (entry != NULL) {
2317 entry->length = length;
2318 entry->memblock = block_addr;
2319 entry->dma_addr = dma_addr;
2320 entry->acc_handle = acc_handle;
2321 entry->dma_handle = dma_h;
2322 list_add(&entry->item, &blockpool->free_block_list);
2323 blockpool->pool_size++;
2324 status = VXGE_HW_OK;
2325 } else
2326 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2327
2328 blockpool->req_out--;
2329
2330 req_out = blockpool->req_out;
2331 exit:
2332 return;
2333 }
2334
2335 static inline void
2336 vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
2337 {
2338 gfp_t flags;
2339 void *vaddr;
2340
2341 if (in_interrupt())
2342 flags = GFP_ATOMIC | GFP_DMA;
2343 else
2344 flags = GFP_KERNEL | GFP_DMA;
2345
2346 vaddr = kmalloc((size), flags);
2347
2348 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
2349 }
2350
2351 /*
2352 * __vxge_hw_blockpool_blocks_add - Request additional blocks
2353 */
2354 static
2355 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
2356 {
2357 u32 nreq = 0, i;
2358
2359 if ((blockpool->pool_size + blockpool->req_out) <
2360 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
2361 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
2362 blockpool->req_out += nreq;
2363 }
2364
2365 for (i = 0; i < nreq; i++)
2366 vxge_os_dma_malloc_async(
2367 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2368 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
2369 }
2370
2371 /*
2372 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
2373 * Allocates a block of memory of given size, either from block pool
2374 * or by calling vxge_os_dma_malloc()
2375 */
2376 static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
2377 struct vxge_hw_mempool_dma *dma_object)
2378 {
2379 struct __vxge_hw_blockpool_entry *entry = NULL;
2380 struct __vxge_hw_blockpool *blockpool;
2381 void *memblock = NULL;
2382 enum vxge_hw_status status = VXGE_HW_OK;
2383
2384 blockpool = &devh->block_pool;
2385
2386 if (size != blockpool->block_size) {
2387
2388 memblock = vxge_os_dma_malloc(devh->pdev, size,
2389 &dma_object->handle,
2390 &dma_object->acc_handle);
2391
2392 if (memblock == NULL) {
2393 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2394 goto exit;
2395 }
2396
2397 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
2398 PCI_DMA_BIDIRECTIONAL);
2399
2400 if (unlikely(pci_dma_mapping_error(devh->pdev,
2401 dma_object->addr))) {
2402 vxge_os_dma_free(devh->pdev, memblock,
2403 &dma_object->acc_handle);
2404 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2405 goto exit;
2406 }
2407
2408 } else {
2409
2410 if (!list_empty(&blockpool->free_block_list))
2411 entry = (struct __vxge_hw_blockpool_entry *)
2412 list_first_entry(&blockpool->free_block_list,
2413 struct __vxge_hw_blockpool_entry,
2414 item);
2415
2416 if (entry != NULL) {
2417 list_del(&entry->item);
2418 dma_object->addr = entry->dma_addr;
2419 dma_object->handle = entry->dma_handle;
2420 dma_object->acc_handle = entry->acc_handle;
2421 memblock = entry->memblock;
2422
2423 list_add(&entry->item,
2424 &blockpool->free_entry_list);
2425 blockpool->pool_size--;
2426 }
2427
2428 if (memblock != NULL)
2429 __vxge_hw_blockpool_blocks_add(blockpool);
2430 }
2431 exit:
2432 return memblock;
2433 }
2434
2435 /*
2436 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
2437 */
2438 static void
2439 __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
2440 {
2441 struct list_head *p, *n;
2442
2443 list_for_each_safe(p, n, &blockpool->free_block_list) {
2444
2445 if (blockpool->pool_size < blockpool->pool_max)
2446 break;
2447
2448 pci_unmap_single(
2449 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2450 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
2451 ((struct __vxge_hw_blockpool_entry *)p)->length,
2452 PCI_DMA_BIDIRECTIONAL);
2453
2454 vxge_os_dma_free(
2455 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2456 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
2457 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
2458
2459 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
2460
2461 list_add(p, &blockpool->free_entry_list);
2462
2463 blockpool->pool_size--;
2464
2465 }
2466 }
2467
2468 /*
2469 * __vxge_hw_blockpool_free - Frees the memory allcoated with
2470 * __vxge_hw_blockpool_malloc
2471 */
2472 static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
2473 void *memblock, u32 size,
2474 struct vxge_hw_mempool_dma *dma_object)
2475 {
2476 struct __vxge_hw_blockpool_entry *entry = NULL;
2477 struct __vxge_hw_blockpool *blockpool;
2478 enum vxge_hw_status status = VXGE_HW_OK;
2479
2480 blockpool = &devh->block_pool;
2481
2482 if (size != blockpool->block_size) {
2483 pci_unmap_single(devh->pdev, dma_object->addr, size,
2484 PCI_DMA_BIDIRECTIONAL);
2485 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
2486 } else {
2487
2488 if (!list_empty(&blockpool->free_entry_list))
2489 entry = (struct __vxge_hw_blockpool_entry *)
2490 list_first_entry(&blockpool->free_entry_list,
2491 struct __vxge_hw_blockpool_entry,
2492 item);
2493
2494 if (entry == NULL)
2495 entry = vmalloc(sizeof(
2496 struct __vxge_hw_blockpool_entry));
2497 else
2498 list_del(&entry->item);
2499
2500 if (entry != NULL) {
2501 entry->length = size;
2502 entry->memblock = memblock;
2503 entry->dma_addr = dma_object->addr;
2504 entry->acc_handle = dma_object->acc_handle;
2505 entry->dma_handle = dma_object->handle;
2506 list_add(&entry->item,
2507 &blockpool->free_block_list);
2508 blockpool->pool_size++;
2509 status = VXGE_HW_OK;
2510 } else
2511 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2512
2513 if (status == VXGE_HW_OK)
2514 __vxge_hw_blockpool_blocks_remove(blockpool);
2515 }
2516 }
2517
2518 /*
2519 * vxge_hw_mempool_destroy
2520 */
2521 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
2522 {
2523 u32 i, j;
2524 struct __vxge_hw_device *devh = mempool->devh;
2525
2526 for (i = 0; i < mempool->memblocks_allocated; i++) {
2527 struct vxge_hw_mempool_dma *dma_object;
2528
2529 vxge_assert(mempool->memblocks_arr[i]);
2530 vxge_assert(mempool->memblocks_dma_arr + i);
2531
2532 dma_object = mempool->memblocks_dma_arr + i;
2533
2534 for (j = 0; j < mempool->items_per_memblock; j++) {
2535 u32 index = i * mempool->items_per_memblock + j;
2536
2537 /* to skip last partially filled(if any) memblock */
2538 if (index >= mempool->items_current)
2539 break;
2540 }
2541
2542 vfree(mempool->memblocks_priv_arr[i]);
2543
2544 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
2545 mempool->memblock_size, dma_object);
2546 }
2547
2548 vfree(mempool->items_arr);
2549 vfree(mempool->memblocks_dma_arr);
2550 vfree(mempool->memblocks_priv_arr);
2551 vfree(mempool->memblocks_arr);
2552 vfree(mempool);
2553 }
2554
2555 /*
2556 * __vxge_hw_mempool_grow
2557 * Will resize mempool up to %num_allocate value.
2558 */
2559 static enum vxge_hw_status
2560 __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
2561 u32 *num_allocated)
2562 {
2563 u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
2564 u32 n_items = mempool->items_per_memblock;
2565 u32 start_block_idx = mempool->memblocks_allocated;
2566 u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
2567 enum vxge_hw_status status = VXGE_HW_OK;
2568
2569 *num_allocated = 0;
2570
2571 if (end_block_idx > mempool->memblocks_max) {
2572 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2573 goto exit;
2574 }
2575
2576 for (i = start_block_idx; i < end_block_idx; i++) {
2577 u32 j;
2578 u32 is_last = ((end_block_idx - 1) == i);
2579 struct vxge_hw_mempool_dma *dma_object =
2580 mempool->memblocks_dma_arr + i;
2581 void *the_memblock;
2582
2583 /* allocate memblock's private part. Each DMA memblock
2584 * has a space allocated for item's private usage upon
2585 * mempool's user request. Each time mempool grows, it will
2586 * allocate new memblock and its private part at once.
2587 * This helps to minimize memory usage a lot. */
2588 mempool->memblocks_priv_arr[i] =
2589 vzalloc(mempool->items_priv_size * n_items);
2590 if (mempool->memblocks_priv_arr[i] == NULL) {
2591 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2592 goto exit;
2593 }
2594
2595 /* allocate DMA-capable memblock */
2596 mempool->memblocks_arr[i] =
2597 __vxge_hw_blockpool_malloc(mempool->devh,
2598 mempool->memblock_size, dma_object);
2599 if (mempool->memblocks_arr[i] == NULL) {
2600 vfree(mempool->memblocks_priv_arr[i]);
2601 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2602 goto exit;
2603 }
2604
2605 (*num_allocated)++;
2606 mempool->memblocks_allocated++;
2607
2608 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
2609
2610 the_memblock = mempool->memblocks_arr[i];
2611
2612 /* fill the items hash array */
2613 for (j = 0; j < n_items; j++) {
2614 u32 index = i * n_items + j;
2615
2616 if (first_time && index >= mempool->items_initial)
2617 break;
2618
2619 mempool->items_arr[index] =
2620 ((char *)the_memblock + j*mempool->item_size);
2621
2622 /* let caller to do more job on each item */
2623 if (mempool->item_func_alloc != NULL)
2624 mempool->item_func_alloc(mempool, i,
2625 dma_object, index, is_last);
2626
2627 mempool->items_current = index + 1;
2628 }
2629
2630 if (first_time && mempool->items_current ==
2631 mempool->items_initial)
2632 break;
2633 }
2634 exit:
2635 return status;
2636 }
2637
2638 /*
2639 * vxge_hw_mempool_create
2640 * This function will create memory pool object. Pool may grow but will
2641 * never shrink. Pool consists of number of dynamically allocated blocks
2642 * with size enough to hold %items_initial number of items. Memory is
2643 * DMA-able but client must map/unmap before interoperating with the device.
2644 */
2645 static struct vxge_hw_mempool *
2646 __vxge_hw_mempool_create(struct __vxge_hw_device *devh,
2647 u32 memblock_size,
2648 u32 item_size,
2649 u32 items_priv_size,
2650 u32 items_initial,
2651 u32 items_max,
2652 struct vxge_hw_mempool_cbs *mp_callback,
2653 void *userdata)
2654 {
2655 enum vxge_hw_status status = VXGE_HW_OK;
2656 u32 memblocks_to_allocate;
2657 struct vxge_hw_mempool *mempool = NULL;
2658 u32 allocated;
2659
2660 if (memblock_size < item_size) {
2661 status = VXGE_HW_FAIL;
2662 goto exit;
2663 }
2664
2665 mempool = vzalloc(sizeof(struct vxge_hw_mempool));
2666 if (mempool == NULL) {
2667 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2668 goto exit;
2669 }
2670
2671 mempool->devh = devh;
2672 mempool->memblock_size = memblock_size;
2673 mempool->items_max = items_max;
2674 mempool->items_initial = items_initial;
2675 mempool->item_size = item_size;
2676 mempool->items_priv_size = items_priv_size;
2677 mempool->item_func_alloc = mp_callback->item_func_alloc;
2678 mempool->userdata = userdata;
2679
2680 mempool->memblocks_allocated = 0;
2681
2682 mempool->items_per_memblock = memblock_size / item_size;
2683
2684 mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
2685 mempool->items_per_memblock;
2686
2687 /* allocate array of memblocks */
2688 mempool->memblocks_arr =
2689 vzalloc(sizeof(void *) * mempool->memblocks_max);
2690 if (mempool->memblocks_arr == NULL) {
2691 __vxge_hw_mempool_destroy(mempool);
2692 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2693 mempool = NULL;
2694 goto exit;
2695 }
2696
2697 /* allocate array of private parts of items per memblocks */
2698 mempool->memblocks_priv_arr =
2699 vzalloc(sizeof(void *) * mempool->memblocks_max);
2700 if (mempool->memblocks_priv_arr == NULL) {
2701 __vxge_hw_mempool_destroy(mempool);
2702 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2703 mempool = NULL;
2704 goto exit;
2705 }
2706
2707 /* allocate array of memblocks DMA objects */
2708 mempool->memblocks_dma_arr =
2709 vzalloc(sizeof(struct vxge_hw_mempool_dma) *
2710 mempool->memblocks_max);
2711 if (mempool->memblocks_dma_arr == NULL) {
2712 __vxge_hw_mempool_destroy(mempool);
2713 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2714 mempool = NULL;
2715 goto exit;
2716 }
2717
2718 /* allocate hash array of items */
2719 mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
2720 if (mempool->items_arr == NULL) {
2721 __vxge_hw_mempool_destroy(mempool);
2722 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2723 mempool = NULL;
2724 goto exit;
2725 }
2726
2727 /* calculate initial number of memblocks */
2728 memblocks_to_allocate = (mempool->items_initial +
2729 mempool->items_per_memblock - 1) /
2730 mempool->items_per_memblock;
2731
2732 /* pre-allocate the mempool */
2733 status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
2734 &allocated);
2735 if (status != VXGE_HW_OK) {
2736 __vxge_hw_mempool_destroy(mempool);
2737 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2738 mempool = NULL;
2739 goto exit;
2740 }
2741
2742 exit:
2743 return mempool;
2744 }
2745
2746 /*
2747 * __vxge_hw_ring_abort - Returns the RxD
2748 * This function terminates the RxDs of ring
2749 */
2750 static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
2751 {
2752 void *rxdh;
2753 struct __vxge_hw_channel *channel;
2754
2755 channel = &ring->channel;
2756
2757 for (;;) {
2758 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
2759
2760 if (rxdh == NULL)
2761 break;
2762
2763 vxge_hw_channel_dtr_complete(channel);
2764
2765 if (ring->rxd_term)
2766 ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
2767 channel->userdata);
2768
2769 vxge_hw_channel_dtr_free(channel, rxdh);
2770 }
2771
2772 return VXGE_HW_OK;
2773 }
2774
2775 /*
2776 * __vxge_hw_ring_reset - Resets the ring
2777 * This function resets the ring during vpath reset operation
2778 */
2779 static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
2780 {
2781 enum vxge_hw_status status = VXGE_HW_OK;
2782 struct __vxge_hw_channel *channel;
2783
2784 channel = &ring->channel;
2785
2786 __vxge_hw_ring_abort(ring);
2787
2788 status = __vxge_hw_channel_reset(channel);
2789
2790 if (status != VXGE_HW_OK)
2791 goto exit;
2792
2793 if (ring->rxd_init) {
2794 status = vxge_hw_ring_replenish(ring);
2795 if (status != VXGE_HW_OK)
2796 goto exit;
2797 }
2798 exit:
2799 return status;
2800 }
2801
2802 /*
2803 * __vxge_hw_ring_delete - Removes the ring
2804 * This function freeup the memory pool and removes the ring
2805 */
2806 static enum vxge_hw_status
2807 __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
2808 {
2809 struct __vxge_hw_ring *ring = vp->vpath->ringh;
2810
2811 __vxge_hw_ring_abort(ring);
2812
2813 if (ring->mempool)
2814 __vxge_hw_mempool_destroy(ring->mempool);
2815
2816 vp->vpath->ringh = NULL;
2817 __vxge_hw_channel_free(&ring->channel);
2818
2819 return VXGE_HW_OK;
2820 }
2821
2822 /*
2823 * __vxge_hw_ring_create - Create a Ring
2824 * This function creates Ring and initializes it.
2825 */
2826 static enum vxge_hw_status
2827 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2828 struct vxge_hw_ring_attr *attr)
2829 {
2830 enum vxge_hw_status status = VXGE_HW_OK;
2831 struct __vxge_hw_ring *ring;
2832 u32 ring_length;
2833 struct vxge_hw_ring_config *config;
2834 struct __vxge_hw_device *hldev;
2835 u32 vp_id;
2836 struct vxge_hw_mempool_cbs ring_mp_callback;
2837
2838 if ((vp == NULL) || (attr == NULL)) {
2839 status = VXGE_HW_FAIL;
2840 goto exit;
2841 }
2842
2843 hldev = vp->vpath->hldev;
2844 vp_id = vp->vpath->vp_id;
2845
2846 config = &hldev->config.vp_config[vp_id].ring;
2847
2848 ring_length = config->ring_blocks *
2849 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2850
2851 ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
2852 VXGE_HW_CHANNEL_TYPE_RING,
2853 ring_length,
2854 attr->per_rxd_space,
2855 attr->userdata);
2856 if (ring == NULL) {
2857 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2858 goto exit;
2859 }
2860
2861 vp->vpath->ringh = ring;
2862 ring->vp_id = vp_id;
2863 ring->vp_reg = vp->vpath->vp_reg;
2864 ring->common_reg = hldev->common_reg;
2865 ring->stats = &vp->vpath->sw_stats->ring_stats;
2866 ring->config = config;
2867 ring->callback = attr->callback;
2868 ring->rxd_init = attr->rxd_init;
2869 ring->rxd_term = attr->rxd_term;
2870 ring->buffer_mode = config->buffer_mode;
2871 ring->rxds_limit = config->rxds_limit;
2872
2873 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
2874 ring->rxd_priv_size =
2875 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
2876 ring->per_rxd_space = attr->per_rxd_space;
2877
2878 ring->rxd_priv_size =
2879 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
2880 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2881
2882 /* how many RxDs can fit into one block. Depends on configured
2883 * buffer_mode. */
2884 ring->rxds_per_block =
2885 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
2886
2887 /* calculate actual RxD block private size */
2888 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
2889 ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
2890 ring->mempool = __vxge_hw_mempool_create(hldev,
2891 VXGE_HW_BLOCK_SIZE,
2892 VXGE_HW_BLOCK_SIZE,
2893 ring->rxdblock_priv_size,
2894 ring->config->ring_blocks,
2895 ring->config->ring_blocks,
2896 &ring_mp_callback,
2897 ring);
2898 if (ring->mempool == NULL) {
2899 __vxge_hw_ring_delete(vp);
2900 return VXGE_HW_ERR_OUT_OF_MEMORY;
2901 }
2902
2903 status = __vxge_hw_channel_initialize(&ring->channel);
2904 if (status != VXGE_HW_OK) {
2905 __vxge_hw_ring_delete(vp);
2906 goto exit;
2907 }
2908
2909 /* Note:
2910 * Specifying rxd_init callback means two things:
2911 * 1) rxds need to be initialized by driver at channel-open time;
2912 * 2) rxds need to be posted at channel-open time
2913 * (that's what the initial_replenish() below does)
2914 * Currently we don't have a case when the 1) is done without the 2).
2915 */
2916 if (ring->rxd_init) {
2917 status = vxge_hw_ring_replenish(ring);
2918 if (status != VXGE_HW_OK) {
2919 __vxge_hw_ring_delete(vp);
2920 goto exit;
2921 }
2922 }
2923
2924 /* initial replenish will increment the counter in its post() routine,
2925 * we have to reset it */
2926 ring->stats->common_stats.usage_cnt = 0;
2927 exit:
2928 return status;
2929 }
2930
2931 /*
2932 * vxge_hw_device_config_default_get - Initialize device config with defaults.
2933 * Initialize Titan device config with default values.
2934 */
2935 enum vxge_hw_status __devinit
2936 vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2937 {
2938 u32 i;
2939
2940 device_config->dma_blockpool_initial =
2941 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
2942 device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
2943 device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
2944 device_config->rth_en = VXGE_HW_RTH_DEFAULT;
2945 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
2946 device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
2947 device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
2948
2949 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2950 device_config->vp_config[i].vp_id = i;
2951
2952 device_config->vp_config[i].min_bandwidth =
2953 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
2954
2955 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
2956
2957 device_config->vp_config[i].ring.ring_blocks =
2958 VXGE_HW_DEF_RING_BLOCKS;
2959
2960 device_config->vp_config[i].ring.buffer_mode =
2961 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
2962
2963 device_config->vp_config[i].ring.scatter_mode =
2964 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
2965
2966 device_config->vp_config[i].ring.rxds_limit =
2967 VXGE_HW_DEF_RING_RXDS_LIMIT;
2968
2969 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
2970
2971 device_config->vp_config[i].fifo.fifo_blocks =
2972 VXGE_HW_MIN_FIFO_BLOCKS;
2973
2974 device_config->vp_config[i].fifo.max_frags =
2975 VXGE_HW_MAX_FIFO_FRAGS;
2976
2977 device_config->vp_config[i].fifo.memblock_size =
2978 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
2979
2980 device_config->vp_config[i].fifo.alignment_size =
2981 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
2982
2983 device_config->vp_config[i].fifo.intr =
2984 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
2985
2986 device_config->vp_config[i].fifo.no_snoop_bits =
2987 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
2988 device_config->vp_config[i].tti.intr_enable =
2989 VXGE_HW_TIM_INTR_DEFAULT;
2990
2991 device_config->vp_config[i].tti.btimer_val =
2992 VXGE_HW_USE_FLASH_DEFAULT;
2993
2994 device_config->vp_config[i].tti.timer_ac_en =
2995 VXGE_HW_USE_FLASH_DEFAULT;
2996
2997 device_config->vp_config[i].tti.timer_ci_en =
2998 VXGE_HW_USE_FLASH_DEFAULT;
2999
3000 device_config->vp_config[i].tti.timer_ri_en =
3001 VXGE_HW_USE_FLASH_DEFAULT;
3002
3003 device_config->vp_config[i].tti.rtimer_val =
3004 VXGE_HW_USE_FLASH_DEFAULT;
3005
3006 device_config->vp_config[i].tti.util_sel =
3007 VXGE_HW_USE_FLASH_DEFAULT;
3008
3009 device_config->vp_config[i].tti.ltimer_val =
3010 VXGE_HW_USE_FLASH_DEFAULT;
3011
3012 device_config->vp_config[i].tti.urange_a =
3013 VXGE_HW_USE_FLASH_DEFAULT;
3014
3015 device_config->vp_config[i].tti.uec_a =
3016 VXGE_HW_USE_FLASH_DEFAULT;
3017
3018 device_config->vp_config[i].tti.urange_b =
3019 VXGE_HW_USE_FLASH_DEFAULT;
3020
3021 device_config->vp_config[i].tti.uec_b =
3022 VXGE_HW_USE_FLASH_DEFAULT;
3023
3024 device_config->vp_config[i].tti.urange_c =
3025 VXGE_HW_USE_FLASH_DEFAULT;
3026
3027 device_config->vp_config[i].tti.uec_c =
3028 VXGE_HW_USE_FLASH_DEFAULT;
3029
3030 device_config->vp_config[i].tti.uec_d =
3031 VXGE_HW_USE_FLASH_DEFAULT;
3032
3033 device_config->vp_config[i].rti.intr_enable =
3034 VXGE_HW_TIM_INTR_DEFAULT;
3035
3036 device_config->vp_config[i].rti.btimer_val =
3037 VXGE_HW_USE_FLASH_DEFAULT;
3038
3039 device_config->vp_config[i].rti.timer_ac_en =
3040 VXGE_HW_USE_FLASH_DEFAULT;
3041
3042 device_config->vp_config[i].rti.timer_ci_en =
3043 VXGE_HW_USE_FLASH_DEFAULT;
3044
3045 device_config->vp_config[i].rti.timer_ri_en =
3046 VXGE_HW_USE_FLASH_DEFAULT;
3047
3048 device_config->vp_config[i].rti.rtimer_val =
3049 VXGE_HW_USE_FLASH_DEFAULT;
3050
3051 device_config->vp_config[i].rti.util_sel =
3052 VXGE_HW_USE_FLASH_DEFAULT;
3053
3054 device_config->vp_config[i].rti.ltimer_val =
3055 VXGE_HW_USE_FLASH_DEFAULT;
3056
3057 device_config->vp_config[i].rti.urange_a =
3058 VXGE_HW_USE_FLASH_DEFAULT;
3059
3060 device_config->vp_config[i].rti.uec_a =
3061 VXGE_HW_USE_FLASH_DEFAULT;
3062
3063 device_config->vp_config[i].rti.urange_b =
3064 VXGE_HW_USE_FLASH_DEFAULT;
3065
3066 device_config->vp_config[i].rti.uec_b =
3067 VXGE_HW_USE_FLASH_DEFAULT;
3068
3069 device_config->vp_config[i].rti.urange_c =
3070 VXGE_HW_USE_FLASH_DEFAULT;
3071
3072 device_config->vp_config[i].rti.uec_c =
3073 VXGE_HW_USE_FLASH_DEFAULT;
3074
3075 device_config->vp_config[i].rti.uec_d =
3076 VXGE_HW_USE_FLASH_DEFAULT;
3077
3078 device_config->vp_config[i].mtu =
3079 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
3080
3081 device_config->vp_config[i].rpa_strip_vlan_tag =
3082 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
3083 }
3084
3085 return VXGE_HW_OK;
3086 }
3087
3088 /*
3089 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
3090 * Set the swapper bits appropriately for the vpath.
3091 */
3092 static enum vxge_hw_status
3093 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
3094 {
3095 #ifndef __BIG_ENDIAN
3096 u64 val64;
3097
3098 val64 = readq(&vpath_reg->vpath_general_cfg1);
3099 wmb();
3100 val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
3101 writeq(val64, &vpath_reg->vpath_general_cfg1);
3102 wmb();
3103 #endif
3104 return VXGE_HW_OK;
3105 }
3106
3107 /*
3108 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
3109 * Set the swapper bits appropriately for the vpath.
3110 */
3111 static enum vxge_hw_status
3112 __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
3113 struct vxge_hw_vpath_reg __iomem *vpath_reg)
3114 {
3115 u64 val64;
3116
3117 val64 = readq(&legacy_reg->pifm_wr_swap_en);
3118
3119 if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
3120 val64 = readq(&vpath_reg->kdfcctl_cfg0);
3121 wmb();
3122
3123 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
3124 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
3125 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
3126
3127 writeq(val64, &vpath_reg->kdfcctl_cfg0);
3128 wmb();
3129 }
3130
3131 return VXGE_HW_OK;
3132 }
3133
3134 /*
3135 * vxge_hw_mgmt_reg_read - Read Titan register.
3136 */
3137 enum vxge_hw_status
3138 vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
3139 enum vxge_hw_mgmt_reg_type type,
3140 u32 index, u32 offset, u64 *value)
3141 {
3142 enum vxge_hw_status status = VXGE_HW_OK;
3143
3144 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3145 status = VXGE_HW_ERR_INVALID_DEVICE;
3146 goto exit;
3147 }
3148
3149 switch (type) {
3150 case vxge_hw_mgmt_reg_type_legacy:
3151 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
3152 status = VXGE_HW_ERR_INVALID_OFFSET;
3153 break;
3154 }
3155 *value = readq((void __iomem *)hldev->legacy_reg + offset);
3156 break;
3157 case vxge_hw_mgmt_reg_type_toc:
3158 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
3159 status = VXGE_HW_ERR_INVALID_OFFSET;
3160 break;
3161 }
3162 *value = readq((void __iomem *)hldev->toc_reg + offset);
3163 break;
3164 case vxge_hw_mgmt_reg_type_common:
3165 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
3166 status = VXGE_HW_ERR_INVALID_OFFSET;
3167 break;
3168 }
3169 *value = readq((void __iomem *)hldev->common_reg + offset);
3170 break;
3171 case vxge_hw_mgmt_reg_type_mrpcim:
3172 if (!(hldev->access_rights &
3173 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
3174 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3175 break;
3176 }
3177 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
3178 status = VXGE_HW_ERR_INVALID_OFFSET;
3179 break;
3180 }
3181 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
3182 break;
3183 case vxge_hw_mgmt_reg_type_srpcim:
3184 if (!(hldev->access_rights &
3185 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
3186 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3187 break;
3188 }
3189 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3190 status = VXGE_HW_ERR_INVALID_INDEX;
3191 break;
3192 }
3193 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
3194 status = VXGE_HW_ERR_INVALID_OFFSET;
3195 break;
3196 }
3197 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
3198 offset);
3199 break;
3200 case vxge_hw_mgmt_reg_type_vpmgmt:
3201 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3202 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3203 status = VXGE_HW_ERR_INVALID_INDEX;
3204 break;
3205 }
3206 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
3207 status = VXGE_HW_ERR_INVALID_OFFSET;
3208 break;
3209 }
3210 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
3211 offset);
3212 break;
3213 case vxge_hw_mgmt_reg_type_vpath:
3214 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
3215 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3216 status = VXGE_HW_ERR_INVALID_INDEX;
3217 break;
3218 }
3219 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
3220 status = VXGE_HW_ERR_INVALID_INDEX;
3221 break;
3222 }
3223 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
3224 status = VXGE_HW_ERR_INVALID_OFFSET;
3225 break;
3226 }
3227 *value = readq((void __iomem *)hldev->vpath_reg[index] +
3228 offset);
3229 break;
3230 default:
3231 status = VXGE_HW_ERR_INVALID_TYPE;
3232 break;
3233 }
3234
3235 exit:
3236 return status;
3237 }
3238
3239 /*
3240 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
3241 */
3242 enum vxge_hw_status
3243 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
3244 {
3245 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
3246 enum vxge_hw_status status = VXGE_HW_OK;
3247 int i = 0, j = 0;
3248
3249 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3250 if (!((vpath_mask) & vxge_mBIT(i)))
3251 continue;
3252 vpmgmt_reg = hldev->vpmgmt_reg[i];
3253 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
3254 if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
3255 & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
3256 return VXGE_HW_FAIL;
3257 }
3258 }
3259 return status;
3260 }
3261 /*
3262 * vxge_hw_mgmt_reg_Write - Write Titan register.
3263 */
3264 enum vxge_hw_status
3265 vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
3266 enum vxge_hw_mgmt_reg_type type,
3267 u32 index, u32 offset, u64 value)
3268 {
3269 enum vxge_hw_status status = VXGE_HW_OK;
3270
3271 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
3272 status = VXGE_HW_ERR_INVALID_DEVICE;
3273 goto exit;
3274 }
3275
3276 switch (type) {
3277 case vxge_hw_mgmt_reg_type_legacy:
3278 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
3279 status = VXGE_HW_ERR_INVALID_OFFSET;
3280 break;
3281 }
3282 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
3283 break;
3284 case vxge_hw_mgmt_reg_type_toc:
3285 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
3286 status = VXGE_HW_ERR_INVALID_OFFSET;
3287 break;
3288 }
3289 writeq(value, (void __iomem *)hldev->toc_reg + offset);
3290 break;
3291 case vxge_hw_mgmt_reg_type_common:
3292 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
3293 status = VXGE_HW_ERR_INVALID_OFFSET;
3294 break;
3295 }
3296 writeq(value, (void __iomem *)hldev->common_reg + offset);
3297 break;
3298 case vxge_hw_mgmt_reg_type_mrpcim:
3299 if (!(hldev->access_rights &
3300 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
3301 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3302 break;
3303 }
3304 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
3305 status = VXGE_HW_ERR_INVALID_OFFSET;
3306 break;
3307 }
3308 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
3309 break;
3310 case vxge_hw_mgmt_reg_type_srpcim:
3311 if (!(hldev->access_rights &
3312 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
3313 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
3314 break;
3315 }
3316 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
3317 status = VXGE_HW_ERR_INVALID_INDEX;
3318 break;
3319 }
3320 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
3321 status = VXGE_HW_ERR_INVALID_OFFSET;
3322 break;
3323 }
3324 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
3325 offset);
3326
3327 break;
3328 case vxge_hw_mgmt_reg_type_vpmgmt:
3329 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
3330 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3331 status = VXGE_HW_ERR_INVALID_INDEX;
3332 break;
3333 }
3334 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
3335 status = VXGE_HW_ERR_INVALID_OFFSET;
3336 break;
3337 }
3338 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
3339 offset);
3340 break;
3341 case vxge_hw_mgmt_reg_type_vpath:
3342 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
3343 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
3344 status = VXGE_HW_ERR_INVALID_INDEX;
3345 break;
3346 }
3347 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
3348 status = VXGE_HW_ERR_INVALID_OFFSET;
3349 break;
3350 }
3351 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
3352 offset);
3353 break;
3354 default:
3355 status = VXGE_HW_ERR_INVALID_TYPE;
3356 break;
3357 }
3358 exit:
3359 return status;
3360 }
3361
3362 /*
3363 * __vxge_hw_fifo_abort - Returns the TxD
3364 * This function terminates the TxDs of fifo
3365 */
3366 static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
3367 {
3368 void *txdlh;
3369
3370 for (;;) {
3371 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
3372
3373 if (txdlh == NULL)
3374 break;
3375
3376 vxge_hw_channel_dtr_complete(&fifo->channel);
3377
3378 if (fifo->txdl_term) {
3379 fifo->txdl_term(txdlh,
3380 VXGE_HW_TXDL_STATE_POSTED,
3381 fifo->channel.userdata);
3382 }
3383
3384 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
3385 }
3386
3387 return VXGE_HW_OK;
3388 }
3389
3390 /*
3391 * __vxge_hw_fifo_reset - Resets the fifo
3392 * This function resets the fifo during vpath reset operation
3393 */
3394 static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
3395 {
3396 enum vxge_hw_status status = VXGE_HW_OK;
3397
3398 __vxge_hw_fifo_abort(fifo);
3399 status = __vxge_hw_channel_reset(&fifo->channel);
3400
3401 return status;
3402 }
3403
3404 /*
3405 * __vxge_hw_fifo_delete - Removes the FIFO
3406 * This function freeup the memory pool and removes the FIFO
3407 */
3408 static enum vxge_hw_status
3409 __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
3410 {
3411 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
3412
3413 __vxge_hw_fifo_abort(fifo);
3414
3415 if (fifo->mempool)
3416 __vxge_hw_mempool_destroy(fifo->mempool);
3417
3418 vp->vpath->fifoh = NULL;
3419
3420 __vxge_hw_channel_free(&fifo->channel);
3421
3422 return VXGE_HW_OK;
3423 }
3424
3425 /*
3426 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
3427 * list callback
3428 * This function is callback passed to __vxge_hw_mempool_create to create memory
3429 * pool for TxD list
3430 */
3431 static void
3432 __vxge_hw_fifo_mempool_item_alloc(
3433 struct vxge_hw_mempool *mempoolh,
3434 u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
3435 u32 index, u32 is_last)
3436 {
3437 u32 memblock_item_idx;
3438 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
3439 struct vxge_hw_fifo_txd *txdp =
3440 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
3441 struct __vxge_hw_fifo *fifo =
3442 (struct __vxge_hw_fifo *)mempoolh->userdata;
3443 void *memblock = mempoolh->memblocks_arr[memblock_index];
3444
3445 vxge_assert(txdp);
3446
3447 txdp->host_control = (u64) (size_t)
3448 __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
3449 &memblock_item_idx);
3450
3451 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
3452
3453 vxge_assert(txdl_priv);
3454
3455 fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
3456
3457 /* pre-format HW's TxDL's private */
3458 txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
3459 txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
3460 txdl_priv->dma_handle = dma_object->handle;
3461 txdl_priv->memblock = memblock;
3462 txdl_priv->first_txdp = txdp;
3463 txdl_priv->next_txdl_priv = NULL;
3464 txdl_priv->alloc_frags = 0;
3465 }
3466
3467 /*
3468 * __vxge_hw_fifo_create - Create a FIFO
3469 * This function creates FIFO and initializes it.
3470 */
3471 static enum vxge_hw_status
3472 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
3473 struct vxge_hw_fifo_attr *attr)
3474 {
3475 enum vxge_hw_status status = VXGE_HW_OK;
3476 struct __vxge_hw_fifo *fifo;
3477 struct vxge_hw_fifo_config *config;
3478 u32 txdl_size, txdl_per_memblock;
3479 struct vxge_hw_mempool_cbs fifo_mp_callback;
3480 struct __vxge_hw_virtualpath *vpath;
3481
3482 if ((vp == NULL) || (attr == NULL)) {
3483 status = VXGE_HW_ERR_INVALID_HANDLE;
3484 goto exit;
3485 }
3486 vpath = vp->vpath;
3487 config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
3488
3489 txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
3490
3491 txdl_per_memblock = config->memblock_size / txdl_size;
3492
3493 fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
3494 VXGE_HW_CHANNEL_TYPE_FIFO,
3495 config->fifo_blocks * txdl_per_memblock,
3496 attr->per_txdl_space, attr->userdata);
3497
3498 if (fifo == NULL) {
3499 status = VXGE_HW_ERR_OUT_OF_MEMORY;
3500 goto exit;
3501 }
3502
3503 vpath->fifoh = fifo;
3504 fifo->nofl_db = vpath->nofl_db;
3505
3506 fifo->vp_id = vpath->vp_id;
3507 fifo->vp_reg = vpath->vp_reg;
3508 fifo->stats = &vpath->sw_stats->fifo_stats;
3509
3510 fifo->config = config;
3511
3512 /* apply "interrupts per txdl" attribute */
3513 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
3514
3515 if (fifo->config->intr)
3516 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
3517
3518 fifo->no_snoop_bits = config->no_snoop_bits;
3519
3520 /*
3521 * FIFO memory management strategy:
3522 *
3523 * TxDL split into three independent parts:
3524 * - set of TxD's
3525 * - TxD HW private part
3526 * - driver private part
3527 *
3528 * Adaptative memory allocation used. i.e. Memory allocated on
3529 * demand with the size which will fit into one memory block.
3530 * One memory block may contain more than one TxDL.
3531 *
3532 * During "reserve" operations more memory can be allocated on demand
3533 * for example due to FIFO full condition.
3534 *
3535 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
3536 * routine which will essentially stop the channel and free resources.
3537 */
3538
3539 /* TxDL common private size == TxDL private + driver private */
3540 fifo->priv_size =
3541 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
3542 fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
3543 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
3544
3545 fifo->per_txdl_space = attr->per_txdl_space;
3546
3547 /* recompute txdl size to be cacheline aligned */
3548 fifo->txdl_size = txdl_size;
3549 fifo->txdl_per_memblock = txdl_per_memblock;
3550
3551 fifo->txdl_term = attr->txdl_term;
3552 fifo->callback = attr->callback;
3553
3554 if (fifo->txdl_per_memblock == 0) {
3555 __vxge_hw_fifo_delete(vp);
3556 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
3557 goto exit;
3558 }
3559
3560 fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
3561
3562 fifo->mempool =
3563 __vxge_hw_mempool_create(vpath->hldev,
3564 fifo->config->memblock_size,
3565 fifo->txdl_size,
3566 fifo->priv_size,
3567 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3568 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
3569 &fifo_mp_callback,
3570 fifo);
3571
3572 if (fifo->mempool == NULL) {
3573 __vxge_hw_fifo_delete(vp);
3574 status = VXGE_HW_ERR_OUT_OF_MEMORY;
3575 goto exit;
3576 }
3577
3578 status = __vxge_hw_channel_initialize(&fifo->channel);
3579 if (status != VXGE_HW_OK) {
3580 __vxge_hw_fifo_delete(vp);
3581 goto exit;
3582 }
3583
3584 vxge_assert(fifo->channel.reserve_ptr);
3585 exit:
3586 return status;
3587 }
3588
3589 /*
3590 * __vxge_hw_vpath_pci_read - Read the content of given address
3591 * in pci config space.
3592 * Read from the vpath pci config space.
3593 */
3594 static enum vxge_hw_status
3595 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
3596 u32 phy_func_0, u32 offset, u32 *val)
3597 {
3598 u64 val64;
3599 enum vxge_hw_status status = VXGE_HW_OK;
3600 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
3601
3602 val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
3603
3604 if (phy_func_0)
3605 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
3606
3607 writeq(val64, &vp_reg->pci_config_access_cfg1);
3608 wmb();
3609 writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
3610 &vp_reg->pci_config_access_cfg2);
3611 wmb();
3612
3613 status = __vxge_hw_device_register_poll(
3614 &vp_reg->pci_config_access_cfg2,
3615 VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3616
3617 if (status != VXGE_HW_OK)
3618 goto exit;
3619
3620 val64 = readq(&vp_reg->pci_config_access_status);
3621
3622 if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
3623 status = VXGE_HW_FAIL;
3624 *val = 0;
3625 } else
3626 *val = (u32)vxge_bVALn(val64, 32, 32);
3627 exit:
3628 return status;
3629 }
3630
3631 /**
3632 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
3633 * @hldev: HW device.
3634 * @on_off: TRUE if flickering to be on, FALSE to be off
3635 *
3636 * Flicker the link LED.
3637 */
3638 enum vxge_hw_status
3639 vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
3640 {
3641 struct __vxge_hw_virtualpath *vpath;
3642 u64 data0, data1 = 0, steer_ctrl = 0;
3643 enum vxge_hw_status status;
3644
3645 if (hldev == NULL) {
3646 status = VXGE_HW_ERR_INVALID_DEVICE;
3647 goto exit;
3648 }
3649
3650 vpath = &hldev->virtual_paths[hldev->first_vp_id];
3651
3652 data0 = on_off;
3653 status = vxge_hw_vpath_fw_api(vpath,
3654 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
3655 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
3656 0, &data0, &data1, &steer_ctrl);
3657 exit:
3658 return status;
3659 }
3660
3661 /*
3662 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3663 */
3664 enum vxge_hw_status
3665 __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3666 u32 action, u32 rts_table, u32 offset,
3667 u64 *data0, u64 *data1)
3668 {
3669 enum vxge_hw_status status;
3670 u64 steer_ctrl = 0;
3671
3672 if (vp == NULL) {
3673 status = VXGE_HW_ERR_INVALID_HANDLE;
3674 goto exit;
3675 }
3676
3677 if ((rts_table ==
3678 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3679 (rts_table ==
3680 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3681 (rts_table ==
3682 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3683 (rts_table ==
3684 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3685 steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3686 }
3687
3688 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3689 data0, data1, &steer_ctrl);
3690 if (status != VXGE_HW_OK)
3691 goto exit;
3692
3693 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
3694 (rts_table !=
3695 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3696 *data1 = 0;
3697 exit:
3698 return status;
3699 }
3700
3701 /*
3702 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3703 */
3704 enum vxge_hw_status
3705 __vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
3706 u32 rts_table, u32 offset, u64 steer_data0,
3707 u64 steer_data1)
3708 {
3709 u64 data0, data1 = 0, steer_ctrl = 0;
3710 enum vxge_hw_status status;
3711
3712 if (vp == NULL) {
3713 status = VXGE_HW_ERR_INVALID_HANDLE;
3714 goto exit;
3715 }
3716
3717 data0 = steer_data0;
3718
3719 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3720 (rts_table ==
3721 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3722 data1 = steer_data1;
3723
3724 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3725 &data0, &data1, &steer_ctrl);
3726 exit:
3727 return status;
3728 }
3729
3730 /*
3731 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3732 */
3733 enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3734 struct __vxge_hw_vpath_handle *vp,
3735 enum vxge_hw_rth_algoritms algorithm,
3736 struct vxge_hw_rth_hash_types *hash_type,
3737 u16 bucket_size)
3738 {
3739 u64 data0, data1;
3740 enum vxge_hw_status status = VXGE_HW_OK;
3741
3742 if (vp == NULL) {
3743 status = VXGE_HW_ERR_INVALID_HANDLE;
3744 goto exit;
3745 }
3746
3747 status = __vxge_hw_vpath_rts_table_get(vp,
3748 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3749 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3750 0, &data0, &data1);
3751 if (status != VXGE_HW_OK)
3752 goto exit;
3753
3754 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3755 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3756
3757 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3758 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3759 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3760
3761 if (hash_type->hash_type_tcpipv4_en)
3762 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3763
3764 if (hash_type->hash_type_ipv4_en)
3765 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3766
3767 if (hash_type->hash_type_tcpipv6_en)
3768 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3769
3770 if (hash_type->hash_type_ipv6_en)
3771 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3772
3773 if (hash_type->hash_type_tcpipv6ex_en)
3774 data0 |=
3775 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3776
3777 if (hash_type->hash_type_ipv6ex_en)
3778 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3779
3780 if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3781 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3782 else
3783 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3784
3785 status = __vxge_hw_vpath_rts_table_set(vp,
3786 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3787 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3788 0, data0, 0);
3789 exit:
3790 return status;
3791 }
3792
3793 static void
3794 vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3795 u16 flag, u8 *itable)
3796 {
3797 switch (flag) {
3798 case 1:
3799 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3800 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3801 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3802 itable[j]);
3803 case 2:
3804 *data0 |=
3805 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3806 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3807 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3808 itable[j]);
3809 case 3:
3810 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3811 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3812 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3813 itable[j]);
3814 case 4:
3815 *data1 |=
3816 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3817 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3818 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3819 itable[j]);
3820 default:
3821 return;
3822 }
3823 }
3824 /*
3825 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3826 */
3827 enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3828 struct __vxge_hw_vpath_handle **vpath_handles,
3829 u32 vpath_count,
3830 u8 *mtable,
3831 u8 *itable,
3832 u32 itable_size)
3833 {
3834 u32 i, j, action, rts_table;
3835 u64 data0;
3836 u64 data1;
3837 u32 max_entries;
3838 enum vxge_hw_status status = VXGE_HW_OK;
3839 struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3840
3841 if (vp == NULL) {
3842 status = VXGE_HW_ERR_INVALID_HANDLE;
3843 goto exit;
3844 }
3845
3846 max_entries = (((u32)1) << itable_size);
3847
3848 if (vp->vpath->hldev->config.rth_it_type
3849 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3850 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3851 rts_table =
3852 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3853
3854 for (j = 0; j < max_entries; j++) {
3855
3856 data1 = 0;
3857
3858 data0 =
3859 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3860 itable[j]);
3861
3862 status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3863 action, rts_table, j, data0, data1);
3864
3865 if (status != VXGE_HW_OK)
3866 goto exit;
3867 }
3868
3869 for (j = 0; j < max_entries; j++) {
3870
3871 data1 = 0;
3872
3873 data0 =
3874 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3875 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3876 itable[j]);
3877
3878 status = __vxge_hw_vpath_rts_table_set(
3879 vpath_handles[mtable[itable[j]]], action,
3880 rts_table, j, data0, data1);
3881
3882 if (status != VXGE_HW_OK)
3883 goto exit;
3884 }
3885 } else {
3886 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3887 rts_table =
3888 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3889 for (i = 0; i < vpath_count; i++) {
3890
3891 for (j = 0; j < max_entries;) {
3892
3893 data0 = 0;
3894 data1 = 0;
3895
3896 while (j < max_entries) {
3897 if (mtable[itable[j]] != i) {
3898 j++;
3899 continue;
3900 }
3901 vxge_hw_rts_rth_data0_data1_get(j,
3902 &data0, &data1, 1, itable);
3903 j++;
3904 break;
3905 }
3906
3907 while (j < max_entries) {
3908 if (mtable[itable[j]] != i) {
3909 j++;
3910 continue;
3911 }
3912 vxge_hw_rts_rth_data0_data1_get(j,
3913 &data0, &data1, 2, itable);
3914 j++;
3915 break;
3916 }
3917
3918 while (j < max_entries) {
3919 if (mtable[itable[j]] != i) {
3920 j++;
3921 continue;
3922 }
3923 vxge_hw_rts_rth_data0_data1_get(j,
3924 &data0, &data1, 3, itable);
3925 j++;
3926 break;
3927 }
3928
3929 while (j < max_entries) {
3930 if (mtable[itable[j]] != i) {
3931 j++;
3932 continue;
3933 }
3934 vxge_hw_rts_rth_data0_data1_get(j,
3935 &data0, &data1, 4, itable);
3936 j++;
3937 break;
3938 }
3939
3940 if (data0 != 0) {
3941 status = __vxge_hw_vpath_rts_table_set(
3942 vpath_handles[i],
3943 action, rts_table,
3944 0, data0, data1);
3945
3946 if (status != VXGE_HW_OK)
3947 goto exit;
3948 }
3949 }
3950 }
3951 }
3952 exit:
3953 return status;
3954 }
3955
3956 /**
3957 * vxge_hw_vpath_check_leak - Check for memory leak
3958 * @ringh: Handle to the ring object used for receive
3959 *
3960 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3961 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3962 * Returns: VXGE_HW_FAIL, if leak has occurred.
3963 *
3964 */
3965 enum vxge_hw_status
3966 vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3967 {
3968 enum vxge_hw_status status = VXGE_HW_OK;
3969 u64 rxd_new_count, rxd_spat;
3970
3971 if (ring == NULL)
3972 return status;
3973
3974 rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3975 rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3976 rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3977
3978 if (rxd_new_count >= rxd_spat)
3979 status = VXGE_HW_FAIL;
3980
3981 return status;
3982 }
3983
3984 /*
3985 * __vxge_hw_vpath_mgmt_read
3986 * This routine reads the vpath_mgmt registers
3987 */
3988 static enum vxge_hw_status
3989 __vxge_hw_vpath_mgmt_read(
3990 struct __vxge_hw_device *hldev,
3991 struct __vxge_hw_virtualpath *vpath)
3992 {
3993 u32 i, mtu = 0, max_pyld = 0;
3994 u64 val64;
3995 enum vxge_hw_status status = VXGE_HW_OK;
3996
3997 for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3998
3999 val64 = readq(&vpath->vpmgmt_reg->
4000 rxmac_cfg0_port_vpmgmt_clone[i]);
4001 max_pyld =
4002 (u32)
4003 VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
4004 (val64);
4005 if (mtu < max_pyld)
4006 mtu = max_pyld;
4007 }
4008
4009 vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
4010
4011 val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
4012
4013 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4014 if (val64 & vxge_mBIT(i))
4015 vpath->vsport_number = i;
4016 }
4017
4018 val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
4019
4020 if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
4021 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
4022 else
4023 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
4024
4025 return status;
4026 }
4027
4028 /*
4029 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
4030 * This routine checks the vpath_rst_in_prog register to see if
4031 * adapter completed the reset process for the vpath
4032 */
4033 static enum vxge_hw_status
4034 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
4035 {
4036 enum vxge_hw_status status;
4037
4038 status = __vxge_hw_device_register_poll(
4039 &vpath->hldev->common_reg->vpath_rst_in_prog,
4040 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
4041 1 << (16 - vpath->vp_id)),
4042 vpath->hldev->config.device_poll_millis);
4043
4044 return status;
4045 }
4046
4047 /*
4048 * __vxge_hw_vpath_reset
4049 * This routine resets the vpath on the device
4050 */
4051 static enum vxge_hw_status
4052 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4053 {
4054 u64 val64;
4055 enum vxge_hw_status status = VXGE_HW_OK;
4056
4057 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
4058
4059 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4060 &hldev->common_reg->cmn_rsthdlr_cfg0);
4061
4062 return status;
4063 }
4064
4065 /*
4066 * __vxge_hw_vpath_sw_reset
4067 * This routine resets the vpath structures
4068 */
4069 static enum vxge_hw_status
4070 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
4071 {
4072 enum vxge_hw_status status = VXGE_HW_OK;
4073 struct __vxge_hw_virtualpath *vpath;
4074
4075 vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
4076
4077 if (vpath->ringh) {
4078 status = __vxge_hw_ring_reset(vpath->ringh);
4079 if (status != VXGE_HW_OK)
4080 goto exit;
4081 }
4082
4083 if (vpath->fifoh)
4084 status = __vxge_hw_fifo_reset(vpath->fifoh);
4085 exit:
4086 return status;
4087 }
4088
4089 /*
4090 * __vxge_hw_vpath_prc_configure
4091 * This routine configures the prc registers of virtual path using the config
4092 * passed
4093 */
4094 static void
4095 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4096 {
4097 u64 val64;
4098 struct __vxge_hw_virtualpath *vpath;
4099 struct vxge_hw_vp_config *vp_config;
4100 struct vxge_hw_vpath_reg __iomem *vp_reg;
4101
4102 vpath = &hldev->virtual_paths[vp_id];
4103 vp_reg = vpath->vp_reg;
4104 vp_config = vpath->vp_config;
4105
4106 if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
4107 return;
4108
4109 val64 = readq(&vp_reg->prc_cfg1);
4110 val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
4111 writeq(val64, &vp_reg->prc_cfg1);
4112
4113 val64 = readq(&vpath->vp_reg->prc_cfg6);
4114 val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
4115 writeq(val64, &vpath->vp_reg->prc_cfg6);
4116
4117 val64 = readq(&vp_reg->prc_cfg7);
4118
4119 if (vpath->vp_config->ring.scatter_mode !=
4120 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
4121
4122 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
4123
4124 switch (vpath->vp_config->ring.scatter_mode) {
4125 case VXGE_HW_RING_SCATTER_MODE_A:
4126 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4127 VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
4128 break;
4129 case VXGE_HW_RING_SCATTER_MODE_B:
4130 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4131 VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
4132 break;
4133 case VXGE_HW_RING_SCATTER_MODE_C:
4134 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
4135 VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
4136 break;
4137 }
4138 }
4139
4140 writeq(val64, &vp_reg->prc_cfg7);
4141
4142 writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
4143 __vxge_hw_ring_first_block_address_get(
4144 vpath->ringh) >> 3), &vp_reg->prc_cfg5);
4145
4146 val64 = readq(&vp_reg->prc_cfg4);
4147 val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
4148 val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
4149
4150 val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
4151 VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
4152
4153 if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
4154 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
4155 else
4156 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
4157
4158 writeq(val64, &vp_reg->prc_cfg4);
4159 }
4160
4161 /*
4162 * __vxge_hw_vpath_kdfc_configure
4163 * This routine configures the kdfc registers of virtual path using the
4164 * config passed
4165 */
4166 static enum vxge_hw_status
4167 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4168 {
4169 u64 val64;
4170 u64 vpath_stride;
4171 enum vxge_hw_status status = VXGE_HW_OK;
4172 struct __vxge_hw_virtualpath *vpath;
4173 struct vxge_hw_vpath_reg __iomem *vp_reg;
4174
4175 vpath = &hldev->virtual_paths[vp_id];
4176 vp_reg = vpath->vp_reg;
4177 status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
4178
4179 if (status != VXGE_HW_OK)
4180 goto exit;
4181
4182 val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
4183
4184 vpath->max_kdfc_db =
4185 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
4186 val64+1)/2;
4187
4188 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4189
4190 vpath->max_nofl_db = vpath->max_kdfc_db;
4191
4192 if (vpath->max_nofl_db <
4193 ((vpath->vp_config->fifo.memblock_size /
4194 (vpath->vp_config->fifo.max_frags *
4195 sizeof(struct vxge_hw_fifo_txd))) *
4196 vpath->vp_config->fifo.fifo_blocks)) {
4197
4198 return VXGE_HW_BADCFG_FIFO_BLOCKS;
4199 }
4200 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
4201 (vpath->max_nofl_db*2)-1);
4202 }
4203
4204 writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
4205
4206 writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
4207 &vp_reg->kdfc_fifo_trpl_ctrl);
4208
4209 val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
4210
4211 val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
4212 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
4213
4214 val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
4215 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
4216 #ifndef __BIG_ENDIAN
4217 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
4218 #endif
4219 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
4220
4221 writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
4222 writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
4223 wmb();
4224 vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
4225
4226 vpath->nofl_db =
4227 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
4228 (hldev->kdfc + (vp_id *
4229 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
4230 vpath_stride)));
4231 exit:
4232 return status;
4233 }
4234
4235 /*
4236 * __vxge_hw_vpath_mac_configure
4237 * This routine configures the mac of virtual path using the config passed
4238 */
4239 static enum vxge_hw_status
4240 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4241 {
4242 u64 val64;
4243 enum vxge_hw_status status = VXGE_HW_OK;
4244 struct __vxge_hw_virtualpath *vpath;
4245 struct vxge_hw_vp_config *vp_config;
4246 struct vxge_hw_vpath_reg __iomem *vp_reg;
4247
4248 vpath = &hldev->virtual_paths[vp_id];
4249 vp_reg = vpath->vp_reg;
4250 vp_config = vpath->vp_config;
4251
4252 writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
4253 vpath->vsport_number), &vp_reg->xmac_vsport_choice);
4254
4255 if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4256
4257 val64 = readq(&vp_reg->xmac_rpa_vcfg);
4258
4259 if (vp_config->rpa_strip_vlan_tag !=
4260 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
4261 if (vp_config->rpa_strip_vlan_tag)
4262 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
4263 else
4264 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
4265 }
4266
4267 writeq(val64, &vp_reg->xmac_rpa_vcfg);
4268 val64 = readq(&vp_reg->rxmac_vcfg0);
4269
4270 if (vp_config->mtu !=
4271 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
4272 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4273 if ((vp_config->mtu +
4274 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
4275 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
4276 vp_config->mtu +
4277 VXGE_HW_MAC_HEADER_MAX_SIZE);
4278 else
4279 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
4280 vpath->max_mtu);
4281 }
4282
4283 writeq(val64, &vp_reg->rxmac_vcfg0);
4284
4285 val64 = readq(&vp_reg->rxmac_vcfg1);
4286
4287 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
4288 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
4289
4290 if (hldev->config.rth_it_type ==
4291 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
4292 val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
4293 0x2) |
4294 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
4295 }
4296
4297 writeq(val64, &vp_reg->rxmac_vcfg1);
4298 }
4299 return status;
4300 }
4301
4302 /*
4303 * __vxge_hw_vpath_tim_configure
4304 * This routine configures the tim registers of virtual path using the config
4305 * passed
4306 */
4307 static enum vxge_hw_status
4308 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4309 {
4310 u64 val64;
4311 enum vxge_hw_status status = VXGE_HW_OK;
4312 struct __vxge_hw_virtualpath *vpath;
4313 struct vxge_hw_vpath_reg __iomem *vp_reg;
4314 struct vxge_hw_vp_config *config;
4315
4316 vpath = &hldev->virtual_paths[vp_id];
4317 vp_reg = vpath->vp_reg;
4318 config = vpath->vp_config;
4319
4320 writeq(0, &vp_reg->tim_dest_addr);
4321 writeq(0, &vp_reg->tim_vpath_map);
4322 writeq(0, &vp_reg->tim_bitmap);
4323 writeq(0, &vp_reg->tim_remap);
4324
4325 if (config->ring.enable == VXGE_HW_RING_ENABLE)
4326 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
4327 (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4328 VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
4329
4330 val64 = readq(&vp_reg->tim_pci_cfg);
4331 val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
4332 writeq(val64, &vp_reg->tim_pci_cfg);
4333
4334 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4335
4336 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4337
4338 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4339 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4340 0x3ffffff);
4341 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4342 config->tti.btimer_val);
4343 }
4344
4345 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
4346
4347 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
4348 if (config->tti.timer_ac_en)
4349 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4350 else
4351 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4352 }
4353
4354 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
4355 if (config->tti.timer_ci_en)
4356 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4357 else
4358 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4359 }
4360
4361 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4362 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4363 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4364 config->tti.urange_a);
4365 }
4366
4367 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4368 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4369 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4370 config->tti.urange_b);
4371 }
4372
4373 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4374 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4375 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4376 config->tti.urange_c);
4377 }
4378
4379 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4380 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4381
4382 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4383 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4384 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4385 config->tti.uec_a);
4386 }
4387
4388 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4389 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4390 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4391 config->tti.uec_b);
4392 }
4393
4394 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4395 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4396 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4397 config->tti.uec_c);
4398 }
4399
4400 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4401 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4402 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4403 config->tti.uec_d);
4404 }
4405
4406 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4407 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4408
4409 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4410 if (config->tti.timer_ri_en)
4411 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4412 else
4413 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4414 }
4415
4416 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4417 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4418 0x3ffffff);
4419 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4420 config->tti.rtimer_val);
4421 }
4422
4423 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4424 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4425 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
4426 }
4427
4428 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4429 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4430 0x3ffffff);
4431 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4432 config->tti.ltimer_val);
4433 }
4434
4435 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4436 }
4437
4438 if (config->ring.enable == VXGE_HW_RING_ENABLE) {
4439
4440 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4441
4442 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4443 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4444 0x3ffffff);
4445 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
4446 config->rti.btimer_val);
4447 }
4448
4449 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
4450
4451 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
4452 if (config->rti.timer_ac_en)
4453 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4454 else
4455 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
4456 }
4457
4458 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
4459 if (config->rti.timer_ci_en)
4460 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4461 else
4462 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4463 }
4464
4465 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4466 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4467 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4468 config->rti.urange_a);
4469 }
4470
4471 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4472 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4473 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4474 config->rti.urange_b);
4475 }
4476
4477 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4478 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4479 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4480 config->rti.urange_c);
4481 }
4482
4483 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4484 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4485
4486 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4487 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4488 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4489 config->rti.uec_a);
4490 }
4491
4492 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4493 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4494 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4495 config->rti.uec_b);
4496 }
4497
4498 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4499 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4500 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4501 config->rti.uec_c);
4502 }
4503
4504 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4505 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4506 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4507 config->rti.uec_d);
4508 }
4509
4510 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4511 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4512
4513 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4514 if (config->rti.timer_ri_en)
4515 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4516 else
4517 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4518 }
4519
4520 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4521 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4522 0x3ffffff);
4523 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4524 config->rti.rtimer_val);
4525 }
4526
4527 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4528 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4529 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
4530 }
4531
4532 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4533 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4534 0x3ffffff);
4535 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4536 config->rti.ltimer_val);
4537 }
4538
4539 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4540 }
4541
4542 val64 = 0;
4543 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4544 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4545 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4546 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4547 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4548 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4549
4550 val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
4551 val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
4552 val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
4553 writeq(val64, &vp_reg->tim_wrkld_clc);
4554
4555 return status;
4556 }
4557
4558 void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4559 {
4560 struct __vxge_hw_virtualpath *vpath;
4561 struct vxge_hw_vpath_reg __iomem *vp_reg;
4562 struct vxge_hw_vp_config *config;
4563 u64 val64;
4564
4565 vpath = &hldev->virtual_paths[vp_id];
4566 vp_reg = vpath->vp_reg;
4567 config = vpath->vp_config;
4568
4569 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
4570 config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
4571 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4572 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4573 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4574 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4575 }
4576 }
4577
4578 /*
4579 * __vxge_hw_vpath_initialize
4580 * This routine is the final phase of init which initializes the
4581 * registers of the vpath using the configuration passed.
4582 */
4583 static enum vxge_hw_status
4584 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4585 {
4586 u64 val64;
4587 u32 val32;
4588 enum vxge_hw_status status = VXGE_HW_OK;
4589 struct __vxge_hw_virtualpath *vpath;
4590 struct vxge_hw_vpath_reg __iomem *vp_reg;
4591
4592 vpath = &hldev->virtual_paths[vp_id];
4593
4594 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4595 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4596 goto exit;
4597 }
4598 vp_reg = vpath->vp_reg;
4599
4600 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4601 if (status != VXGE_HW_OK)
4602 goto exit;
4603
4604 status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
4605 if (status != VXGE_HW_OK)
4606 goto exit;
4607
4608 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4609 if (status != VXGE_HW_OK)
4610 goto exit;
4611
4612 status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4613 if (status != VXGE_HW_OK)
4614 goto exit;
4615
4616 val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4617
4618 /* Get MRRS value from device control */
4619 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4620 if (status == VXGE_HW_OK) {
4621 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4622 val64 &=
4623 ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4624 val64 |=
4625 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4626
4627 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4628 }
4629
4630 val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4631 val64 |=
4632 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4633 VXGE_HW_MAX_PAYLOAD_SIZE_512);
4634
4635 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4636 writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4637
4638 exit:
4639 return status;
4640 }
4641
4642 /*
4643 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4644 * This routine closes all channels it opened and freeup memory
4645 */
4646 static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4647 {
4648 struct __vxge_hw_virtualpath *vpath;
4649
4650 vpath = &hldev->virtual_paths[vp_id];
4651
4652 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4653 goto exit;
4654
4655 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4656 vpath->hldev->tim_int_mask1, vpath->vp_id);
4657 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4658
4659 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4660 exit:
4661 return;
4662 }
4663
4664 /*
4665 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4666 * This routine is the initial phase of init which resets the vpath and
4667 * initializes the software support structures.
4668 */
4669 static enum vxge_hw_status
4670 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4671 struct vxge_hw_vp_config *config)
4672 {
4673 struct __vxge_hw_virtualpath *vpath;
4674 enum vxge_hw_status status = VXGE_HW_OK;
4675
4676 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4677 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4678 goto exit;
4679 }
4680
4681 vpath = &hldev->virtual_paths[vp_id];
4682
4683 spin_lock_init(&hldev->virtual_paths[vp_id].lock);
4684 vpath->vp_id = vp_id;
4685 vpath->vp_open = VXGE_HW_VP_OPEN;
4686 vpath->hldev = hldev;
4687 vpath->vp_config = config;
4688 vpath->vp_reg = hldev->vpath_reg[vp_id];
4689 vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4690
4691 __vxge_hw_vpath_reset(hldev, vp_id);
4692
4693 status = __vxge_hw_vpath_reset_check(vpath);
4694 if (status != VXGE_HW_OK) {
4695 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4696 goto exit;
4697 }
4698
4699 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4700 if (status != VXGE_HW_OK) {
4701 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4702 goto exit;
4703 }
4704
4705 INIT_LIST_HEAD(&vpath->vpath_handles);
4706
4707 vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4708
4709 VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4710 hldev->tim_int_mask1, vp_id);
4711
4712 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4713 if (status != VXGE_HW_OK)
4714 __vxge_hw_vp_terminate(hldev, vp_id);
4715 exit:
4716 return status;
4717 }
4718
4719 /*
4720 * vxge_hw_vpath_mtu_set - Set MTU.
4721 * Set new MTU value. Example, to use jumbo frames:
4722 * vxge_hw_vpath_mtu_set(my_device, 9600);
4723 */
4724 enum vxge_hw_status
4725 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4726 {
4727 u64 val64;
4728 enum vxge_hw_status status = VXGE_HW_OK;
4729 struct __vxge_hw_virtualpath *vpath;
4730
4731 if (vp == NULL) {
4732 status = VXGE_HW_ERR_INVALID_HANDLE;
4733 goto exit;
4734 }
4735 vpath = vp->vpath;
4736
4737 new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4738
4739 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4740 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4741
4742 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4743
4744 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4745 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4746
4747 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4748
4749 vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4750
4751 exit:
4752 return status;
4753 }
4754
4755 /*
4756 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4757 * Enable the DMA vpath statistics. The function is to be called to re-enable
4758 * the adapter to update stats into the host memory
4759 */
4760 static enum vxge_hw_status
4761 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4762 {
4763 enum vxge_hw_status status = VXGE_HW_OK;
4764 struct __vxge_hw_virtualpath *vpath;
4765
4766 vpath = vp->vpath;
4767
4768 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4769 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4770 goto exit;
4771 }
4772
4773 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4774 sizeof(struct vxge_hw_vpath_stats_hw_info));
4775
4776 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4777 exit:
4778 return status;
4779 }
4780
4781 /*
4782 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
4783 * This function allocates a block from block pool or from the system
4784 */
4785 static struct __vxge_hw_blockpool_entry *
4786 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
4787 {
4788 struct __vxge_hw_blockpool_entry *entry = NULL;
4789 struct __vxge_hw_blockpool *blockpool;
4790
4791 blockpool = &devh->block_pool;
4792
4793 if (size == blockpool->block_size) {
4794
4795 if (!list_empty(&blockpool->free_block_list))
4796 entry = (struct __vxge_hw_blockpool_entry *)
4797 list_first_entry(&blockpool->free_block_list,
4798 struct __vxge_hw_blockpool_entry,
4799 item);
4800
4801 if (entry != NULL) {
4802 list_del(&entry->item);
4803 blockpool->pool_size--;
4804 }
4805 }
4806
4807 if (entry != NULL)
4808 __vxge_hw_blockpool_blocks_add(blockpool);
4809
4810 return entry;
4811 }
4812
4813 /*
4814 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4815 * This function is used to open access to virtual path of an
4816 * adapter for offload, GRO operations. This function returns
4817 * synchronously.
4818 */
4819 enum vxge_hw_status
4820 vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4821 struct vxge_hw_vpath_attr *attr,
4822 struct __vxge_hw_vpath_handle **vpath_handle)
4823 {
4824 struct __vxge_hw_virtualpath *vpath;
4825 struct __vxge_hw_vpath_handle *vp;
4826 enum vxge_hw_status status;
4827
4828 vpath = &hldev->virtual_paths[attr->vp_id];
4829
4830 if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4831 status = VXGE_HW_ERR_INVALID_STATE;
4832 goto vpath_open_exit1;
4833 }
4834
4835 status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4836 &hldev->config.vp_config[attr->vp_id]);
4837 if (status != VXGE_HW_OK)
4838 goto vpath_open_exit1;
4839
4840 vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
4841 if (vp == NULL) {
4842 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4843 goto vpath_open_exit2;
4844 }
4845
4846 vp->vpath = vpath;
4847
4848 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4849 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4850 if (status != VXGE_HW_OK)
4851 goto vpath_open_exit6;
4852 }
4853
4854 if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4855 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4856 if (status != VXGE_HW_OK)
4857 goto vpath_open_exit7;
4858
4859 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4860 }
4861
4862 vpath->fifoh->tx_intr_num =
4863 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4864 VXGE_HW_VPATH_INTR_TX;
4865
4866 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4867 VXGE_HW_BLOCK_SIZE);
4868 if (vpath->stats_block == NULL) {
4869 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4870 goto vpath_open_exit8;
4871 }
4872
4873 vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4874 stats_block->memblock;
4875 memset(vpath->hw_stats, 0,
4876 sizeof(struct vxge_hw_vpath_stats_hw_info));
4877
4878 hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4879 vpath->hw_stats;
4880
4881 vpath->hw_stats_sav =
4882 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4883 memset(vpath->hw_stats_sav, 0,
4884 sizeof(struct vxge_hw_vpath_stats_hw_info));
4885
4886 writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4887
4888 status = vxge_hw_vpath_stats_enable(vp);
4889 if (status != VXGE_HW_OK)
4890 goto vpath_open_exit8;
4891
4892 list_add(&vp->item, &vpath->vpath_handles);
4893
4894 hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4895
4896 *vpath_handle = vp;
4897
4898 attr->fifo_attr.userdata = vpath->fifoh;
4899 attr->ring_attr.userdata = vpath->ringh;
4900
4901 return VXGE_HW_OK;
4902
4903 vpath_open_exit8:
4904 if (vpath->ringh != NULL)
4905 __vxge_hw_ring_delete(vp);
4906 vpath_open_exit7:
4907 if (vpath->fifoh != NULL)
4908 __vxge_hw_fifo_delete(vp);
4909 vpath_open_exit6:
4910 vfree(vp);
4911 vpath_open_exit2:
4912 __vxge_hw_vp_terminate(hldev, attr->vp_id);
4913 vpath_open_exit1:
4914
4915 return status;
4916 }
4917
4918 /**
4919 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4920 * (vpath) open
4921 * @vp: Handle got from previous vpath open
4922 *
4923 * This function is used to close access to virtual path opened
4924 * earlier.
4925 */
4926 void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4927 {
4928 struct __vxge_hw_virtualpath *vpath = vp->vpath;
4929 struct __vxge_hw_ring *ring = vpath->ringh;
4930 struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
4931 u64 new_count, val64, val164;
4932
4933 if (vdev->titan1) {
4934 new_count = readq(&vpath->vp_reg->rxdmem_size);
4935 new_count &= 0x1fff;
4936 } else
4937 new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
4938
4939 val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
4940
4941 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4942 &vpath->vp_reg->prc_rxd_doorbell);
4943 readl(&vpath->vp_reg->prc_rxd_doorbell);
4944
4945 val164 /= 2;
4946 val64 = readq(&vpath->vp_reg->prc_cfg6);
4947 val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4948 val64 &= 0x1ff;
4949
4950 /*
4951 * Each RxD is of 4 qwords
4952 */
4953 new_count -= (val64 + 1);
4954 val64 = min(val164, new_count) / 4;
4955
4956 ring->rxds_limit = min(ring->rxds_limit, val64);
4957 if (ring->rxds_limit < 4)
4958 ring->rxds_limit = 4;
4959 }
4960
4961 /*
4962 * __vxge_hw_blockpool_block_free - Frees a block from block pool
4963 * @devh: Hal device
4964 * @entry: Entry of block to be freed
4965 *
4966 * This function frees a block from block pool
4967 */
4968 static void
4969 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
4970 struct __vxge_hw_blockpool_entry *entry)
4971 {
4972 struct __vxge_hw_blockpool *blockpool;
4973
4974 blockpool = &devh->block_pool;
4975
4976 if (entry->length == blockpool->block_size) {
4977 list_add(&entry->item, &blockpool->free_block_list);
4978 blockpool->pool_size++;
4979 }
4980
4981 __vxge_hw_blockpool_blocks_remove(blockpool);
4982 }
4983
4984 /*
4985 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4986 * This function is used to close access to virtual path opened
4987 * earlier.
4988 */
4989 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4990 {
4991 struct __vxge_hw_virtualpath *vpath = NULL;
4992 struct __vxge_hw_device *devh = NULL;
4993 u32 vp_id = vp->vpath->vp_id;
4994 u32 is_empty = TRUE;
4995 enum vxge_hw_status status = VXGE_HW_OK;
4996
4997 vpath = vp->vpath;
4998 devh = vpath->hldev;
4999
5000 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5001 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5002 goto vpath_close_exit;
5003 }
5004
5005 list_del(&vp->item);
5006
5007 if (!list_empty(&vpath->vpath_handles)) {
5008 list_add(&vp->item, &vpath->vpath_handles);
5009 is_empty = FALSE;
5010 }
5011
5012 if (!is_empty) {
5013 status = VXGE_HW_FAIL;
5014 goto vpath_close_exit;
5015 }
5016
5017 devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
5018
5019 if (vpath->ringh != NULL)
5020 __vxge_hw_ring_delete(vp);
5021
5022 if (vpath->fifoh != NULL)
5023 __vxge_hw_fifo_delete(vp);
5024
5025 if (vpath->stats_block != NULL)
5026 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
5027
5028 vfree(vp);
5029
5030 __vxge_hw_vp_terminate(devh, vp_id);
5031
5032 spin_lock(&vpath->lock);
5033 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
5034 spin_unlock(&vpath->lock);
5035
5036 vpath_close_exit:
5037 return status;
5038 }
5039
5040 /*
5041 * vxge_hw_vpath_reset - Resets vpath
5042 * This function is used to request a reset of vpath
5043 */
5044 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
5045 {
5046 enum vxge_hw_status status;
5047 u32 vp_id;
5048 struct __vxge_hw_virtualpath *vpath = vp->vpath;
5049
5050 vp_id = vpath->vp_id;
5051
5052 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5053 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5054 goto exit;
5055 }
5056
5057 status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
5058 if (status == VXGE_HW_OK)
5059 vpath->sw_stats->soft_reset_cnt++;
5060 exit:
5061 return status;
5062 }
5063
5064 /*
5065 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
5066 * This function poll's for the vpath reset completion and re initializes
5067 * the vpath.
5068 */
5069 enum vxge_hw_status
5070 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
5071 {
5072 struct __vxge_hw_virtualpath *vpath = NULL;
5073 enum vxge_hw_status status;
5074 struct __vxge_hw_device *hldev;
5075 u32 vp_id;
5076
5077 vp_id = vp->vpath->vp_id;
5078 vpath = vp->vpath;
5079 hldev = vpath->hldev;
5080
5081 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
5082 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
5083 goto exit;
5084 }
5085
5086 status = __vxge_hw_vpath_reset_check(vpath);
5087 if (status != VXGE_HW_OK)
5088 goto exit;
5089
5090 status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
5091 if (status != VXGE_HW_OK)
5092 goto exit;
5093
5094 status = __vxge_hw_vpath_initialize(hldev, vp_id);
5095 if (status != VXGE_HW_OK)
5096 goto exit;
5097
5098 if (vpath->ringh != NULL)
5099 __vxge_hw_vpath_prc_configure(hldev, vp_id);
5100
5101 memset(vpath->hw_stats, 0,
5102 sizeof(struct vxge_hw_vpath_stats_hw_info));
5103
5104 memset(vpath->hw_stats_sav, 0,
5105 sizeof(struct vxge_hw_vpath_stats_hw_info));
5106
5107 writeq(vpath->stats_block->dma_addr,
5108 &vpath->vp_reg->stats_cfg);
5109
5110 status = vxge_hw_vpath_stats_enable(vp);
5111
5112 exit:
5113 return status;
5114 }
5115
5116 /*
5117 * vxge_hw_vpath_enable - Enable vpath.
5118 * This routine clears the vpath reset thereby enabling a vpath
5119 * to start forwarding frames and generating interrupts.
5120 */
5121 void
5122 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
5123 {
5124 struct __vxge_hw_device *hldev;
5125 u64 val64;
5126
5127 hldev = vp->vpath->hldev;
5128
5129 val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
5130 1 << (16 - vp->vpath->vp_id));
5131
5132 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
5133 &hldev->common_reg->cmn_rsthdlr_cfg1);
5134 }
This page took 0.202114 seconds and 5 git commands to generate.