vxge: Update copyright information
[deliverable/linux.git] / drivers / net / vxge / vxge-traffic.c
1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14 #include <linux/etherdevice.h>
15
16 #include "vxge-traffic.h"
17 #include "vxge-config.h"
18 #include "vxge-main.h"
19
20 /*
21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22 * @vp: Virtual Path handle.
23 *
24 * Enable vpath interrupts. The function is to be executed the last in
25 * vpath initialization sequence.
26 *
27 * See also: vxge_hw_vpath_intr_disable()
28 */
29 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
30 {
31 u64 val64;
32
33 struct __vxge_hw_virtualpath *vpath;
34 struct vxge_hw_vpath_reg __iomem *vp_reg;
35 enum vxge_hw_status status = VXGE_HW_OK;
36 if (vp == NULL) {
37 status = VXGE_HW_ERR_INVALID_HANDLE;
38 goto exit;
39 }
40
41 vpath = vp->vpath;
42
43 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
45 goto exit;
46 }
47
48 vp_reg = vpath->vp_reg;
49
50 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
51
52 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53 &vp_reg->general_errors_reg);
54
55 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56 &vp_reg->pci_config_errors_reg);
57
58 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59 &vp_reg->mrpcim_to_vpath_alarm_reg);
60
61 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62 &vp_reg->srpcim_to_vpath_alarm_reg);
63
64 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65 &vp_reg->vpath_ppif_int_status);
66
67 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68 &vp_reg->srpcim_msg_to_vpath_reg);
69
70 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71 &vp_reg->vpath_pcipif_int_status);
72
73 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74 &vp_reg->prc_alarm_reg);
75
76 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77 &vp_reg->wrdma_alarm_status);
78
79 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80 &vp_reg->asic_ntwk_vp_err_reg);
81
82 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83 &vp_reg->xgmac_vp_int_status);
84
85 val64 = readq(&vp_reg->vpath_general_int_status);
86
87 /* Mask unwanted interrupts */
88
89 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90 &vp_reg->vpath_pcipif_int_mask);
91
92 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93 &vp_reg->srpcim_msg_to_vpath_mask);
94
95 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96 &vp_reg->srpcim_to_vpath_alarm_mask);
97
98 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99 &vp_reg->mrpcim_to_vpath_alarm_mask);
100
101 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102 &vp_reg->pci_config_errors_mask);
103
104 /* Unmask the individual interrupts */
105
106 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110 &vp_reg->general_errors_mask);
111
112 __vxge_hw_pio_mem_write32_upper(
113 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
119 &vp_reg->kdfcctl_errors_mask);
120
121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
122
123 __vxge_hw_pio_mem_write32_upper(
124 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125 &vp_reg->prc_alarm_mask);
126
127 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
129
130 if (vpath->hldev->first_vp_id != vpath->vp_id)
131 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132 &vp_reg->asic_ntwk_vp_err_mask);
133 else
134 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137 &vp_reg->asic_ntwk_vp_err_mask);
138
139 __vxge_hw_pio_mem_write32_upper(0,
140 &vp_reg->vpath_general_int_mask);
141 exit:
142 return status;
143
144 }
145
146 /*
147 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148 * @vp: Virtual Path handle.
149 *
150 * Disable vpath interrupts. The function is to be executed the last in
151 * vpath initialization sequence.
152 *
153 * See also: vxge_hw_vpath_intr_enable()
154 */
155 enum vxge_hw_status vxge_hw_vpath_intr_disable(
156 struct __vxge_hw_vpath_handle *vp)
157 {
158 u64 val64;
159
160 struct __vxge_hw_virtualpath *vpath;
161 enum vxge_hw_status status = VXGE_HW_OK;
162 struct vxge_hw_vpath_reg __iomem *vp_reg;
163 if (vp == NULL) {
164 status = VXGE_HW_ERR_INVALID_HANDLE;
165 goto exit;
166 }
167
168 vpath = vp->vpath;
169
170 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
171 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
172 goto exit;
173 }
174 vp_reg = vpath->vp_reg;
175
176 __vxge_hw_pio_mem_write32_upper(
177 (u32)VXGE_HW_INTR_MASK_ALL,
178 &vp_reg->vpath_general_int_mask);
179
180 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
181
182 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
183
184 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
185 &vp_reg->general_errors_mask);
186
187 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
188 &vp_reg->pci_config_errors_mask);
189
190 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
191 &vp_reg->mrpcim_to_vpath_alarm_mask);
192
193 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
194 &vp_reg->srpcim_to_vpath_alarm_mask);
195
196 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
197 &vp_reg->vpath_ppif_int_mask);
198
199 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
200 &vp_reg->srpcim_msg_to_vpath_mask);
201
202 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
203 &vp_reg->vpath_pcipif_int_mask);
204
205 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
206 &vp_reg->wrdma_alarm_mask);
207
208 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
209 &vp_reg->prc_alarm_mask);
210
211 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
212 &vp_reg->xgmac_vp_int_mask);
213
214 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
215 &vp_reg->asic_ntwk_vp_err_mask);
216
217 exit:
218 return status;
219 }
220
221 /**
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle
224 * @msix_id: MSIX ID
225 *
226 * The function masks the msix interrupt for the given msix_id
227 *
228 * Returns: 0
229 */
230 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
231 {
232
233 __vxge_hw_pio_mem_write32_upper(
234 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
235 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
236 }
237
238 /**
239 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
240 * @channeh: Channel for rx or tx handle
241 * @msix_id: MSI ID
242 *
243 * The function unmasks the msix interrupt for the given msix_id
244 *
245 * Returns: 0
246 */
247 void
248 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
249 {
250
251 __vxge_hw_pio_mem_write32_upper(
252 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
253 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
254 }
255
256 /**
257 * vxge_hw_device_set_intr_type - Updates the configuration
258 * with new interrupt type.
259 * @hldev: HW device handle.
260 * @intr_mode: New interrupt type
261 */
262 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
263 {
264
265 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
266 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
267 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
268 (intr_mode != VXGE_HW_INTR_MODE_DEF))
269 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
270
271 hldev->config.intr_mode = intr_mode;
272 return intr_mode;
273 }
274
275 /**
276 * vxge_hw_device_intr_enable - Enable interrupts.
277 * @hldev: HW device handle.
278 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
279 * the type(s) of interrupts to enable.
280 *
281 * Enable Titan interrupts. The function is to be executed the last in
282 * Titan initialization sequence.
283 *
284 * See also: vxge_hw_device_intr_disable()
285 */
286 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
287 {
288 u32 i;
289 u64 val64;
290 u32 val32;
291
292 vxge_hw_device_mask_all(hldev);
293
294 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
295
296 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
297 continue;
298
299 vxge_hw_vpath_intr_enable(
300 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
301 }
302
303 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
304 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
305 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
306
307 if (val64 != 0) {
308 writeq(val64, &hldev->common_reg->tim_int_status0);
309
310 writeq(~val64, &hldev->common_reg->tim_int_mask0);
311 }
312
313 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
314 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
315
316 if (val32 != 0) {
317 __vxge_hw_pio_mem_write32_upper(val32,
318 &hldev->common_reg->tim_int_status1);
319
320 __vxge_hw_pio_mem_write32_upper(~val32,
321 &hldev->common_reg->tim_int_mask1);
322 }
323 }
324
325 val64 = readq(&hldev->common_reg->titan_general_int_status);
326
327 vxge_hw_device_unmask_all(hldev);
328 }
329
330 /**
331 * vxge_hw_device_intr_disable - Disable Titan interrupts.
332 * @hldev: HW device handle.
333 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
334 * the type(s) of interrupts to disable.
335 *
336 * Disable Titan interrupts.
337 *
338 * See also: vxge_hw_device_intr_enable()
339 */
340 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
341 {
342 u32 i;
343
344 vxge_hw_device_mask_all(hldev);
345
346 /* mask all the tim interrupts */
347 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
348 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
349 &hldev->common_reg->tim_int_mask1);
350
351 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
352
353 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
354 continue;
355
356 vxge_hw_vpath_intr_disable(
357 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
358 }
359 }
360
361 /**
362 * vxge_hw_device_mask_all - Mask all device interrupts.
363 * @hldev: HW device handle.
364 *
365 * Mask all device interrupts.
366 *
367 * See also: vxge_hw_device_unmask_all()
368 */
369 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
370 {
371 u64 val64;
372
373 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
374 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
375
376 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
377 &hldev->common_reg->titan_mask_all_int);
378 }
379
380 /**
381 * vxge_hw_device_unmask_all - Unmask all device interrupts.
382 * @hldev: HW device handle.
383 *
384 * Unmask all device interrupts.
385 *
386 * See also: vxge_hw_device_mask_all()
387 */
388 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
389 {
390 u64 val64 = 0;
391
392 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
393 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
394
395 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
396 &hldev->common_reg->titan_mask_all_int);
397 }
398
399 /**
400 * vxge_hw_device_flush_io - Flush io writes.
401 * @hldev: HW device handle.
402 *
403 * The function performs a read operation to flush io writes.
404 *
405 * Returns: void
406 */
407 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
408 {
409 u32 val32;
410
411 val32 = readl(&hldev->common_reg->titan_general_int_status);
412 }
413
414 /**
415 * vxge_hw_device_begin_irq - Begin IRQ processing.
416 * @hldev: HW device handle.
417 * @skip_alarms: Do not clear the alarms
418 * @reason: "Reason" for the interrupt, the value of Titan's
419 * general_int_status register.
420 *
421 * The function performs two actions, It first checks whether (shared IRQ) the
422 * interrupt was raised by the device. Next, it masks the device interrupts.
423 *
424 * Note:
425 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
426 * bridge. Therefore, two back-to-back interrupts are potentially possible.
427 *
428 * Returns: 0, if the interrupt is not "ours" (note that in this case the
429 * device remain enabled).
430 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
431 * status.
432 */
433 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
434 u32 skip_alarms, u64 *reason)
435 {
436 u32 i;
437 u64 val64;
438 u64 adapter_status;
439 u64 vpath_mask;
440 enum vxge_hw_status ret = VXGE_HW_OK;
441
442 val64 = readq(&hldev->common_reg->titan_general_int_status);
443
444 if (unlikely(!val64)) {
445 /* not Titan interrupt */
446 *reason = 0;
447 ret = VXGE_HW_ERR_WRONG_IRQ;
448 goto exit;
449 }
450
451 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
452
453 adapter_status = readq(&hldev->common_reg->adapter_status);
454
455 if (adapter_status == VXGE_HW_ALL_FOXES) {
456
457 __vxge_hw_device_handle_error(hldev,
458 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
459 *reason = 0;
460 ret = VXGE_HW_ERR_SLOT_FREEZE;
461 goto exit;
462 }
463 }
464
465 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
466
467 *reason = val64;
468
469 vpath_mask = hldev->vpaths_deployed >>
470 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
471
472 if (val64 &
473 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
474 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
475
476 return VXGE_HW_OK;
477 }
478
479 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
480
481 if (unlikely(val64 &
482 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
483
484 enum vxge_hw_status error_level = VXGE_HW_OK;
485
486 hldev->stats.sw_dev_err_stats.vpath_alarms++;
487
488 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
489
490 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
491 continue;
492
493 ret = __vxge_hw_vpath_alarm_process(
494 &hldev->virtual_paths[i], skip_alarms);
495
496 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
497
498 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
499 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
500 break;
501 }
502
503 ret = error_level;
504 }
505 exit:
506 return ret;
507 }
508
509 /*
510 * __vxge_hw_device_handle_link_up_ind
511 * @hldev: HW device handle.
512 *
513 * Link up indication handler. The function is invoked by HW when
514 * Titan indicates that the link is up for programmable amount of time.
515 */
516 enum vxge_hw_status
517 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
518 {
519 /*
520 * If the previous link state is not down, return.
521 */
522 if (hldev->link_state == VXGE_HW_LINK_UP)
523 goto exit;
524
525 hldev->link_state = VXGE_HW_LINK_UP;
526
527 /* notify driver */
528 if (hldev->uld_callbacks.link_up)
529 hldev->uld_callbacks.link_up(hldev);
530 exit:
531 return VXGE_HW_OK;
532 }
533
534 /*
535 * __vxge_hw_device_handle_link_down_ind
536 * @hldev: HW device handle.
537 *
538 * Link down indication handler. The function is invoked by HW when
539 * Titan indicates that the link is down.
540 */
541 enum vxge_hw_status
542 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
543 {
544 /*
545 * If the previous link state is not down, return.
546 */
547 if (hldev->link_state == VXGE_HW_LINK_DOWN)
548 goto exit;
549
550 hldev->link_state = VXGE_HW_LINK_DOWN;
551
552 /* notify driver */
553 if (hldev->uld_callbacks.link_down)
554 hldev->uld_callbacks.link_down(hldev);
555 exit:
556 return VXGE_HW_OK;
557 }
558
559 /**
560 * __vxge_hw_device_handle_error - Handle error
561 * @hldev: HW device
562 * @vp_id: Vpath Id
563 * @type: Error type. Please see enum vxge_hw_event{}
564 *
565 * Handle error.
566 */
567 enum vxge_hw_status
568 __vxge_hw_device_handle_error(
569 struct __vxge_hw_device *hldev,
570 u32 vp_id,
571 enum vxge_hw_event type)
572 {
573 switch (type) {
574 case VXGE_HW_EVENT_UNKNOWN:
575 break;
576 case VXGE_HW_EVENT_RESET_START:
577 case VXGE_HW_EVENT_RESET_COMPLETE:
578 case VXGE_HW_EVENT_LINK_DOWN:
579 case VXGE_HW_EVENT_LINK_UP:
580 goto out;
581 case VXGE_HW_EVENT_ALARM_CLEARED:
582 goto out;
583 case VXGE_HW_EVENT_ECCERR:
584 case VXGE_HW_EVENT_MRPCIM_ECCERR:
585 goto out;
586 case VXGE_HW_EVENT_FIFO_ERR:
587 case VXGE_HW_EVENT_VPATH_ERR:
588 case VXGE_HW_EVENT_CRITICAL_ERR:
589 case VXGE_HW_EVENT_SERR:
590 break;
591 case VXGE_HW_EVENT_SRPCIM_SERR:
592 case VXGE_HW_EVENT_MRPCIM_SERR:
593 goto out;
594 case VXGE_HW_EVENT_SLOT_FREEZE:
595 break;
596 default:
597 vxge_assert(0);
598 goto out;
599 }
600
601 /* notify driver */
602 if (hldev->uld_callbacks.crit_err)
603 hldev->uld_callbacks.crit_err(
604 (struct __vxge_hw_device *)hldev,
605 type, vp_id);
606 out:
607
608 return VXGE_HW_OK;
609 }
610
611 /**
612 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
613 * condition that has caused the Tx and RX interrupt.
614 * @hldev: HW device.
615 *
616 * Acknowledge (that is, clear) the condition that has caused
617 * the Tx and Rx interrupt.
618 * See also: vxge_hw_device_begin_irq(),
619 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
620 */
621 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
622 {
623
624 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
625 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
626 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
627 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
628 &hldev->common_reg->tim_int_status0);
629 }
630
631 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
632 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
633 __vxge_hw_pio_mem_write32_upper(
634 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
635 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
636 &hldev->common_reg->tim_int_status1);
637 }
638 }
639
640 /*
641 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
642 * @channel: Channel
643 * @dtrh: Buffer to return the DTR pointer
644 *
645 * Allocates a dtr from the reserve array. If the reserve array is empty,
646 * it swaps the reserve and free arrays.
647 *
648 */
649 enum vxge_hw_status
650 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
651 {
652 void **tmp_arr;
653
654 if (channel->reserve_ptr - channel->reserve_top > 0) {
655 _alloc_after_swap:
656 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
657
658 return VXGE_HW_OK;
659 }
660
661 /* switch between empty and full arrays */
662
663 /* the idea behind such a design is that by having free and reserved
664 * arrays separated we basically separated irq and non-irq parts.
665 * i.e. no additional lock need to be done when we free a resource */
666
667 if (channel->length - channel->free_ptr > 0) {
668
669 tmp_arr = channel->reserve_arr;
670 channel->reserve_arr = channel->free_arr;
671 channel->free_arr = tmp_arr;
672 channel->reserve_ptr = channel->length;
673 channel->reserve_top = channel->free_ptr;
674 channel->free_ptr = channel->length;
675
676 channel->stats->reserve_free_swaps_cnt++;
677
678 goto _alloc_after_swap;
679 }
680
681 channel->stats->full_cnt++;
682
683 *dtrh = NULL;
684 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
685 }
686
687 /*
688 * vxge_hw_channel_dtr_post - Post a dtr to the channel
689 * @channelh: Channel
690 * @dtrh: DTR pointer
691 *
692 * Posts a dtr to work array.
693 *
694 */
695 void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
696 {
697 vxge_assert(channel->work_arr[channel->post_index] == NULL);
698
699 channel->work_arr[channel->post_index++] = dtrh;
700
701 /* wrap-around */
702 if (channel->post_index == channel->length)
703 channel->post_index = 0;
704 }
705
706 /*
707 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
708 * @channel: Channel
709 * @dtr: Buffer to return the next completed DTR pointer
710 *
711 * Returns the next completed dtr with out removing it from work array
712 *
713 */
714 void
715 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
716 {
717 vxge_assert(channel->compl_index < channel->length);
718
719 *dtrh = channel->work_arr[channel->compl_index];
720 prefetch(*dtrh);
721 }
722
723 /*
724 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
725 * @channel: Channel handle
726 *
727 * Removes the next completed dtr from work array
728 *
729 */
730 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
731 {
732 channel->work_arr[channel->compl_index] = NULL;
733
734 /* wrap-around */
735 if (++channel->compl_index == channel->length)
736 channel->compl_index = 0;
737
738 channel->stats->total_compl_cnt++;
739 }
740
741 /*
742 * vxge_hw_channel_dtr_free - Frees a dtr
743 * @channel: Channel handle
744 * @dtr: DTR pointer
745 *
746 * Returns the dtr to free array
747 *
748 */
749 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
750 {
751 channel->free_arr[--channel->free_ptr] = dtrh;
752 }
753
754 /*
755 * vxge_hw_channel_dtr_count
756 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
757 *
758 * Retreive number of DTRs available. This function can not be called
759 * from data path. ring_initial_replenishi() is the only user.
760 */
761 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
762 {
763 return (channel->reserve_ptr - channel->reserve_top) +
764 (channel->length - channel->free_ptr);
765 }
766
767 /**
768 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
769 * @ring: Handle to the ring object used for receive
770 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
771 * with a valid handle.
772 *
773 * Reserve Rx descriptor for the subsequent filling-in driver
774 * and posting on the corresponding channel (@channelh)
775 * via vxge_hw_ring_rxd_post().
776 *
777 * Returns: VXGE_HW_OK - success.
778 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
779 *
780 */
781 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
782 void **rxdh)
783 {
784 enum vxge_hw_status status;
785 struct __vxge_hw_channel *channel;
786
787 channel = &ring->channel;
788
789 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
790
791 if (status == VXGE_HW_OK) {
792 struct vxge_hw_ring_rxd_1 *rxdp =
793 (struct vxge_hw_ring_rxd_1 *)*rxdh;
794
795 rxdp->control_0 = rxdp->control_1 = 0;
796 }
797
798 return status;
799 }
800
801 /**
802 * vxge_hw_ring_rxd_free - Free descriptor.
803 * @ring: Handle to the ring object used for receive
804 * @rxdh: Descriptor handle.
805 *
806 * Free the reserved descriptor. This operation is "symmetrical" to
807 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
808 * lifecycle.
809 *
810 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
811 * be:
812 *
813 * - reserved (vxge_hw_ring_rxd_reserve);
814 *
815 * - posted (vxge_hw_ring_rxd_post);
816 *
817 * - completed (vxge_hw_ring_rxd_next_completed);
818 *
819 * - and recycled again (vxge_hw_ring_rxd_free).
820 *
821 * For alternative state transitions and more details please refer to
822 * the design doc.
823 *
824 */
825 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
826 {
827 struct __vxge_hw_channel *channel;
828
829 channel = &ring->channel;
830
831 vxge_hw_channel_dtr_free(channel, rxdh);
832
833 }
834
835 /**
836 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
837 * @ring: Handle to the ring object used for receive
838 * @rxdh: Descriptor handle.
839 *
840 * This routine prepares a rxd and posts
841 */
842 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
843 {
844 struct __vxge_hw_channel *channel;
845
846 channel = &ring->channel;
847
848 vxge_hw_channel_dtr_post(channel, rxdh);
849 }
850
851 /**
852 * vxge_hw_ring_rxd_post_post - Process rxd after post.
853 * @ring: Handle to the ring object used for receive
854 * @rxdh: Descriptor handle.
855 *
856 * Processes rxd after post
857 */
858 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
859 {
860 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
861 struct __vxge_hw_channel *channel;
862
863 channel = &ring->channel;
864
865 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
866
867 if (ring->stats->common_stats.usage_cnt > 0)
868 ring->stats->common_stats.usage_cnt--;
869 }
870
871 /**
872 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
873 * @ring: Handle to the ring object used for receive
874 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
875 *
876 * Post descriptor on the ring.
877 * Prior to posting the descriptor should be filled in accordance with
878 * Host/Titan interface specification for a given service (LL, etc.).
879 *
880 */
881 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
882 {
883 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
884 struct __vxge_hw_channel *channel;
885
886 channel = &ring->channel;
887
888 wmb();
889 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
890
891 vxge_hw_channel_dtr_post(channel, rxdh);
892
893 if (ring->stats->common_stats.usage_cnt > 0)
894 ring->stats->common_stats.usage_cnt--;
895 }
896
897 /**
898 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
899 * @ring: Handle to the ring object used for receive
900 * @rxdh: Descriptor handle.
901 *
902 * Processes rxd after post with memory barrier.
903 */
904 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
905 {
906 struct __vxge_hw_channel *channel;
907
908 channel = &ring->channel;
909
910 wmb();
911 vxge_hw_ring_rxd_post_post(ring, rxdh);
912 }
913
914 /**
915 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
916 * @ring: Handle to the ring object used for receive
917 * @rxdh: Descriptor handle. Returned by HW.
918 * @t_code: Transfer code, as per Titan User Guide,
919 * Receive Descriptor Format. Returned by HW.
920 *
921 * Retrieve the _next_ completed descriptor.
922 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
923 * driver of new completed descriptors. After that
924 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
925 * completions (the very first completion is passed by HW via
926 * vxge_hw_ring_callback_f).
927 *
928 * Implementation-wise, the driver is free to call
929 * vxge_hw_ring_rxd_next_completed either immediately from inside the
930 * ring callback, or in a deferred fashion and separate (from HW)
931 * context.
932 *
933 * Non-zero @t_code means failure to fill-in receive buffer(s)
934 * of the descriptor.
935 * For instance, parity error detected during the data transfer.
936 * In this case Titan will complete the descriptor and indicate
937 * for the host that the received data is not to be used.
938 * For details please refer to Titan User Guide.
939 *
940 * Returns: VXGE_HW_OK - success.
941 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
942 * are currently available for processing.
943 *
944 * See also: vxge_hw_ring_callback_f{},
945 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
946 */
947 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
948 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
949 {
950 struct __vxge_hw_channel *channel;
951 struct vxge_hw_ring_rxd_1 *rxdp;
952 enum vxge_hw_status status = VXGE_HW_OK;
953 u64 control_0, own;
954
955 channel = &ring->channel;
956
957 vxge_hw_channel_dtr_try_complete(channel, rxdh);
958
959 rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
960 if (rxdp == NULL) {
961 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
962 goto exit;
963 }
964
965 control_0 = rxdp->control_0;
966 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
967 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
968
969 /* check whether it is not the end */
970 if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
971
972 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
973 0);
974
975 ++ring->cmpl_cnt;
976 vxge_hw_channel_dtr_complete(channel);
977
978 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
979
980 ring->stats->common_stats.usage_cnt++;
981 if (ring->stats->common_stats.usage_max <
982 ring->stats->common_stats.usage_cnt)
983 ring->stats->common_stats.usage_max =
984 ring->stats->common_stats.usage_cnt;
985
986 status = VXGE_HW_OK;
987 goto exit;
988 }
989
990 /* reset it. since we don't want to return
991 * garbage to the driver */
992 *rxdh = NULL;
993 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
994 exit:
995 return status;
996 }
997
998 /**
999 * vxge_hw_ring_handle_tcode - Handle transfer code.
1000 * @ring: Handle to the ring object used for receive
1001 * @rxdh: Descriptor handle.
1002 * @t_code: One of the enumerated (and documented in the Titan user guide)
1003 * "transfer codes".
1004 *
1005 * Handle descriptor's transfer code. The latter comes with each completed
1006 * descriptor.
1007 *
1008 * Returns: one of the enum vxge_hw_status{} enumerated types.
1009 * VXGE_HW_OK - for success.
1010 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1011 */
1012 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1013 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1014 {
1015 struct __vxge_hw_channel *channel;
1016 enum vxge_hw_status status = VXGE_HW_OK;
1017
1018 channel = &ring->channel;
1019
1020 /* If the t_code is not supported and if the
1021 * t_code is other than 0x5 (unparseable packet
1022 * such as unknown UPV6 header), Drop it !!!
1023 */
1024
1025 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1026 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1027 status = VXGE_HW_OK;
1028 goto exit;
1029 }
1030
1031 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1032 status = VXGE_HW_ERR_INVALID_TCODE;
1033 goto exit;
1034 }
1035
1036 ring->stats->rxd_t_code_err_cnt[t_code]++;
1037 exit:
1038 return status;
1039 }
1040
1041 /**
1042 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1043 *
1044 * @fifo: fifohandle
1045 * @txdl_ptr: The starting location of the TxDL in host memory
1046 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1047 * @no_snoop: No snoop flags
1048 *
1049 * This function posts a non-offload doorbell to doorbell FIFO
1050 *
1051 */
1052 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1053 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1054 {
1055 struct __vxge_hw_channel *channel;
1056
1057 channel = &fifo->channel;
1058
1059 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1060 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1061 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1062 &fifo->nofl_db->control_0);
1063
1064 mmiowb();
1065
1066 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1067
1068 mmiowb();
1069 }
1070
1071 /**
1072 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1073 * the fifo
1074 * @fifoh: Handle to the fifo object used for non offload send
1075 */
1076 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1077 {
1078 return vxge_hw_channel_dtr_count(&fifoh->channel);
1079 }
1080
1081 /**
1082 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1083 * @fifoh: Handle to the fifo object used for non offload send
1084 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1085 * with a valid handle.
1086 * @txdl_priv: Buffer to return the pointer to per txdl space
1087 *
1088 * Reserve a single TxDL (that is, fifo descriptor)
1089 * for the subsequent filling-in by driver)
1090 * and posting on the corresponding channel (@channelh)
1091 * via vxge_hw_fifo_txdl_post().
1092 *
1093 * Note: it is the responsibility of driver to reserve multiple descriptors
1094 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1095 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1096 *
1097 * Returns: VXGE_HW_OK - success;
1098 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1099 *
1100 */
1101 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1102 struct __vxge_hw_fifo *fifo,
1103 void **txdlh, void **txdl_priv)
1104 {
1105 struct __vxge_hw_channel *channel;
1106 enum vxge_hw_status status;
1107 int i;
1108
1109 channel = &fifo->channel;
1110
1111 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1112
1113 if (status == VXGE_HW_OK) {
1114 struct vxge_hw_fifo_txd *txdp =
1115 (struct vxge_hw_fifo_txd *)*txdlh;
1116 struct __vxge_hw_fifo_txdl_priv *priv;
1117
1118 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1119
1120 /* reset the TxDL's private */
1121 priv->align_dma_offset = 0;
1122 priv->align_vaddr_start = priv->align_vaddr;
1123 priv->align_used_frags = 0;
1124 priv->frags = 0;
1125 priv->alloc_frags = fifo->config->max_frags;
1126 priv->next_txdl_priv = NULL;
1127
1128 *txdl_priv = (void *)(size_t)txdp->host_control;
1129
1130 for (i = 0; i < fifo->config->max_frags; i++) {
1131 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1132 txdp->control_0 = txdp->control_1 = 0;
1133 }
1134 }
1135
1136 return status;
1137 }
1138
1139 /**
1140 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1141 * descriptor.
1142 * @fifo: Handle to the fifo object used for non offload send
1143 * @txdlh: Descriptor handle.
1144 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1145 * (of buffers).
1146 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1147 * @size: Size of the data buffer (in bytes).
1148 *
1149 * This API is part of the preparation of the transmit descriptor for posting
1150 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1151 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1152 * All three APIs fill in the fields of the fifo descriptor,
1153 * in accordance with the Titan specification.
1154 *
1155 */
1156 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1157 void *txdlh, u32 frag_idx,
1158 dma_addr_t dma_pointer, u32 size)
1159 {
1160 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1161 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1162 struct __vxge_hw_channel *channel;
1163
1164 channel = &fifo->channel;
1165
1166 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1167 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1168
1169 if (frag_idx != 0)
1170 txdp->control_0 = txdp->control_1 = 0;
1171 else {
1172 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1173 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1174 txdp->control_1 |= fifo->interrupt_type;
1175 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1176 fifo->tx_intr_num);
1177 if (txdl_priv->frags) {
1178 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1179 (txdl_priv->frags - 1);
1180 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1181 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1182 }
1183 }
1184
1185 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1186
1187 txdp->buffer_pointer = (u64)dma_pointer;
1188 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1189 fifo->stats->total_buffers++;
1190 txdl_priv->frags++;
1191 }
1192
1193 /**
1194 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1195 * @fifo: Handle to the fifo object used for non offload send
1196 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1197 * @frags: Number of contiguous buffers that are part of a single
1198 * transmit operation.
1199 *
1200 * Post descriptor on the 'fifo' type channel for transmission.
1201 * Prior to posting the descriptor should be filled in accordance with
1202 * Host/Titan interface specification for a given service (LL, etc.).
1203 *
1204 */
1205 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1206 {
1207 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1208 struct vxge_hw_fifo_txd *txdp_last;
1209 struct vxge_hw_fifo_txd *txdp_first;
1210 struct __vxge_hw_channel *channel;
1211
1212 channel = &fifo->channel;
1213
1214 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1215 txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1216
1217 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1218 txdp_last->control_0 |=
1219 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1220 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1221
1222 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1223
1224 __vxge_hw_non_offload_db_post(fifo,
1225 (u64)txdl_priv->dma_addr,
1226 txdl_priv->frags - 1,
1227 fifo->no_snoop_bits);
1228
1229 fifo->stats->total_posts++;
1230 fifo->stats->common_stats.usage_cnt++;
1231 if (fifo->stats->common_stats.usage_max <
1232 fifo->stats->common_stats.usage_cnt)
1233 fifo->stats->common_stats.usage_max =
1234 fifo->stats->common_stats.usage_cnt;
1235 }
1236
1237 /**
1238 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1239 * @fifo: Handle to the fifo object used for non offload send
1240 * @txdlh: Descriptor handle. Returned by HW.
1241 * @t_code: Transfer code, as per Titan User Guide,
1242 * Transmit Descriptor Format.
1243 * Returned by HW.
1244 *
1245 * Retrieve the _next_ completed descriptor.
1246 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1247 * driver of new completed descriptors. After that
1248 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1249 * completions (the very first completion is passed by HW via
1250 * vxge_hw_channel_callback_f).
1251 *
1252 * Implementation-wise, the driver is free to call
1253 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1254 * channel callback, or in a deferred fashion and separate (from HW)
1255 * context.
1256 *
1257 * Non-zero @t_code means failure to process the descriptor.
1258 * The failure could happen, for instance, when the link is
1259 * down, in which case Titan completes the descriptor because it
1260 * is not able to send the data out.
1261 *
1262 * For details please refer to Titan User Guide.
1263 *
1264 * Returns: VXGE_HW_OK - success.
1265 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1266 * are currently available for processing.
1267 *
1268 */
1269 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1270 struct __vxge_hw_fifo *fifo, void **txdlh,
1271 enum vxge_hw_fifo_tcode *t_code)
1272 {
1273 struct __vxge_hw_channel *channel;
1274 struct vxge_hw_fifo_txd *txdp;
1275 enum vxge_hw_status status = VXGE_HW_OK;
1276
1277 channel = &fifo->channel;
1278
1279 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1280
1281 txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1282 if (txdp == NULL) {
1283 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1284 goto exit;
1285 }
1286
1287 /* check whether host owns it */
1288 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1289
1290 vxge_assert(txdp->host_control != 0);
1291
1292 vxge_hw_channel_dtr_complete(channel);
1293
1294 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1295
1296 if (fifo->stats->common_stats.usage_cnt > 0)
1297 fifo->stats->common_stats.usage_cnt--;
1298
1299 status = VXGE_HW_OK;
1300 goto exit;
1301 }
1302
1303 /* no more completions */
1304 *txdlh = NULL;
1305 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1306 exit:
1307 return status;
1308 }
1309
1310 /**
1311 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1312 * @fifo: Handle to the fifo object used for non offload send
1313 * @txdlh: Descriptor handle.
1314 * @t_code: One of the enumerated (and documented in the Titan user guide)
1315 * "transfer codes".
1316 *
1317 * Handle descriptor's transfer code. The latter comes with each completed
1318 * descriptor.
1319 *
1320 * Returns: one of the enum vxge_hw_status{} enumerated types.
1321 * VXGE_HW_OK - for success.
1322 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1323 */
1324 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1325 void *txdlh,
1326 enum vxge_hw_fifo_tcode t_code)
1327 {
1328 struct __vxge_hw_channel *channel;
1329
1330 enum vxge_hw_status status = VXGE_HW_OK;
1331 channel = &fifo->channel;
1332
1333 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1334 status = VXGE_HW_ERR_INVALID_TCODE;
1335 goto exit;
1336 }
1337
1338 fifo->stats->txd_t_code_err_cnt[t_code]++;
1339 exit:
1340 return status;
1341 }
1342
1343 /**
1344 * vxge_hw_fifo_txdl_free - Free descriptor.
1345 * @fifo: Handle to the fifo object used for non offload send
1346 * @txdlh: Descriptor handle.
1347 *
1348 * Free the reserved descriptor. This operation is "symmetrical" to
1349 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1350 * lifecycle.
1351 *
1352 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1353 * be:
1354 *
1355 * - reserved (vxge_hw_fifo_txdl_reserve);
1356 *
1357 * - posted (vxge_hw_fifo_txdl_post);
1358 *
1359 * - completed (vxge_hw_fifo_txdl_next_completed);
1360 *
1361 * - and recycled again (vxge_hw_fifo_txdl_free).
1362 *
1363 * For alternative state transitions and more details please refer to
1364 * the design doc.
1365 *
1366 */
1367 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1368 {
1369 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1370 u32 max_frags;
1371 struct __vxge_hw_channel *channel;
1372
1373 channel = &fifo->channel;
1374
1375 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1376 (struct vxge_hw_fifo_txd *)txdlh);
1377
1378 max_frags = fifo->config->max_frags;
1379
1380 vxge_hw_channel_dtr_free(channel, txdlh);
1381 }
1382
1383 /**
1384 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1385 * to MAC address table.
1386 * @vp: Vpath handle.
1387 * @macaddr: MAC address to be added for this vpath into the list
1388 * @macaddr_mask: MAC address mask for macaddr
1389 * @duplicate_mode: Duplicate MAC address add mode. Please see
1390 * enum vxge_hw_vpath_mac_addr_add_mode{}
1391 *
1392 * Adds the given mac address and mac address mask into the list for this
1393 * vpath.
1394 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1395 * vxge_hw_vpath_mac_addr_get_next
1396 *
1397 */
1398 enum vxge_hw_status
1399 vxge_hw_vpath_mac_addr_add(
1400 struct __vxge_hw_vpath_handle *vp,
1401 u8 (macaddr)[ETH_ALEN],
1402 u8 (macaddr_mask)[ETH_ALEN],
1403 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1404 {
1405 u32 i;
1406 u64 data1 = 0ULL;
1407 u64 data2 = 0ULL;
1408 enum vxge_hw_status status = VXGE_HW_OK;
1409
1410 if (vp == NULL) {
1411 status = VXGE_HW_ERR_INVALID_HANDLE;
1412 goto exit;
1413 }
1414
1415 for (i = 0; i < ETH_ALEN; i++) {
1416 data1 <<= 8;
1417 data1 |= (u8)macaddr[i];
1418
1419 data2 <<= 8;
1420 data2 |= (u8)macaddr_mask[i];
1421 }
1422
1423 switch (duplicate_mode) {
1424 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1425 i = 0;
1426 break;
1427 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1428 i = 1;
1429 break;
1430 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1431 i = 2;
1432 break;
1433 default:
1434 i = 0;
1435 break;
1436 }
1437
1438 status = __vxge_hw_vpath_rts_table_set(vp,
1439 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1440 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1441 0,
1442 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1443 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1444 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1445 exit:
1446 return status;
1447 }
1448
1449 /**
1450 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1451 * from MAC address table.
1452 * @vp: Vpath handle.
1453 * @macaddr: First MAC address entry for this vpath in the list
1454 * @macaddr_mask: MAC address mask for macaddr
1455 *
1456 * Returns the first mac address and mac address mask in the list for this
1457 * vpath.
1458 * see also: vxge_hw_vpath_mac_addr_get_next
1459 *
1460 */
1461 enum vxge_hw_status
1462 vxge_hw_vpath_mac_addr_get(
1463 struct __vxge_hw_vpath_handle *vp,
1464 u8 (macaddr)[ETH_ALEN],
1465 u8 (macaddr_mask)[ETH_ALEN])
1466 {
1467 u32 i;
1468 u64 data1 = 0ULL;
1469 u64 data2 = 0ULL;
1470 enum vxge_hw_status status = VXGE_HW_OK;
1471
1472 if (vp == NULL) {
1473 status = VXGE_HW_ERR_INVALID_HANDLE;
1474 goto exit;
1475 }
1476
1477 status = __vxge_hw_vpath_rts_table_get(vp,
1478 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1479 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1480 0, &data1, &data2);
1481
1482 if (status != VXGE_HW_OK)
1483 goto exit;
1484
1485 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1486
1487 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1488
1489 for (i = ETH_ALEN; i > 0; i--) {
1490 macaddr[i-1] = (u8)(data1 & 0xFF);
1491 data1 >>= 8;
1492
1493 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1494 data2 >>= 8;
1495 }
1496 exit:
1497 return status;
1498 }
1499
1500 /**
1501 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1502 * vpath
1503 * from MAC address table.
1504 * @vp: Vpath handle.
1505 * @macaddr: Next MAC address entry for this vpath in the list
1506 * @macaddr_mask: MAC address mask for macaddr
1507 *
1508 * Returns the next mac address and mac address mask in the list for this
1509 * vpath.
1510 * see also: vxge_hw_vpath_mac_addr_get
1511 *
1512 */
1513 enum vxge_hw_status
1514 vxge_hw_vpath_mac_addr_get_next(
1515 struct __vxge_hw_vpath_handle *vp,
1516 u8 (macaddr)[ETH_ALEN],
1517 u8 (macaddr_mask)[ETH_ALEN])
1518 {
1519 u32 i;
1520 u64 data1 = 0ULL;
1521 u64 data2 = 0ULL;
1522 enum vxge_hw_status status = VXGE_HW_OK;
1523
1524 if (vp == NULL) {
1525 status = VXGE_HW_ERR_INVALID_HANDLE;
1526 goto exit;
1527 }
1528
1529 status = __vxge_hw_vpath_rts_table_get(vp,
1530 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1531 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1532 0, &data1, &data2);
1533
1534 if (status != VXGE_HW_OK)
1535 goto exit;
1536
1537 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1538
1539 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1540
1541 for (i = ETH_ALEN; i > 0; i--) {
1542 macaddr[i-1] = (u8)(data1 & 0xFF);
1543 data1 >>= 8;
1544
1545 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1546 data2 >>= 8;
1547 }
1548
1549 exit:
1550 return status;
1551 }
1552
1553 /**
1554 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1555 * to MAC address table.
1556 * @vp: Vpath handle.
1557 * @macaddr: MAC address to be added for this vpath into the list
1558 * @macaddr_mask: MAC address mask for macaddr
1559 *
1560 * Delete the given mac address and mac address mask into the list for this
1561 * vpath.
1562 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1563 * vxge_hw_vpath_mac_addr_get_next
1564 *
1565 */
1566 enum vxge_hw_status
1567 vxge_hw_vpath_mac_addr_delete(
1568 struct __vxge_hw_vpath_handle *vp,
1569 u8 (macaddr)[ETH_ALEN],
1570 u8 (macaddr_mask)[ETH_ALEN])
1571 {
1572 u32 i;
1573 u64 data1 = 0ULL;
1574 u64 data2 = 0ULL;
1575 enum vxge_hw_status status = VXGE_HW_OK;
1576
1577 if (vp == NULL) {
1578 status = VXGE_HW_ERR_INVALID_HANDLE;
1579 goto exit;
1580 }
1581
1582 for (i = 0; i < ETH_ALEN; i++) {
1583 data1 <<= 8;
1584 data1 |= (u8)macaddr[i];
1585
1586 data2 <<= 8;
1587 data2 |= (u8)macaddr_mask[i];
1588 }
1589
1590 status = __vxge_hw_vpath_rts_table_set(vp,
1591 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1592 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1593 0,
1594 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1595 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1596 exit:
1597 return status;
1598 }
1599
1600 /**
1601 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1602 * to vlan id table.
1603 * @vp: Vpath handle.
1604 * @vid: vlan id to be added for this vpath into the list
1605 *
1606 * Adds the given vlan id into the list for this vpath.
1607 * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1608 * vxge_hw_vpath_vid_get_next
1609 *
1610 */
1611 enum vxge_hw_status
1612 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1613 {
1614 enum vxge_hw_status status = VXGE_HW_OK;
1615
1616 if (vp == NULL) {
1617 status = VXGE_HW_ERR_INVALID_HANDLE;
1618 goto exit;
1619 }
1620
1621 status = __vxge_hw_vpath_rts_table_set(vp,
1622 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1623 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1624 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1625 exit:
1626 return status;
1627 }
1628
1629 /**
1630 * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1631 * from vlan id table.
1632 * @vp: Vpath handle.
1633 * @vid: Buffer to return vlan id
1634 *
1635 * Returns the first vlan id in the list for this vpath.
1636 * see also: vxge_hw_vpath_vid_get_next
1637 *
1638 */
1639 enum vxge_hw_status
1640 vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1641 {
1642 u64 data;
1643 enum vxge_hw_status status = VXGE_HW_OK;
1644
1645 if (vp == NULL) {
1646 status = VXGE_HW_ERR_INVALID_HANDLE;
1647 goto exit;
1648 }
1649
1650 status = __vxge_hw_vpath_rts_table_get(vp,
1651 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1652 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1653 0, vid, &data);
1654
1655 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1656 exit:
1657 return status;
1658 }
1659
1660 /**
1661 * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1662 * from vlan id table.
1663 * @vp: Vpath handle.
1664 * @vid: Buffer to return vlan id
1665 *
1666 * Returns the next vlan id in the list for this vpath.
1667 * see also: vxge_hw_vpath_vid_get
1668 *
1669 */
1670 enum vxge_hw_status
1671 vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1672 {
1673 u64 data;
1674 enum vxge_hw_status status = VXGE_HW_OK;
1675
1676 if (vp == NULL) {
1677 status = VXGE_HW_ERR_INVALID_HANDLE;
1678 goto exit;
1679 }
1680
1681 status = __vxge_hw_vpath_rts_table_get(vp,
1682 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1683 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1684 0, vid, &data);
1685
1686 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1687 exit:
1688 return status;
1689 }
1690
1691 /**
1692 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1693 * to vlan id table.
1694 * @vp: Vpath handle.
1695 * @vid: vlan id to be added for this vpath into the list
1696 *
1697 * Adds the given vlan id into the list for this vpath.
1698 * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1699 * vxge_hw_vpath_vid_get_next
1700 *
1701 */
1702 enum vxge_hw_status
1703 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1704 {
1705 enum vxge_hw_status status = VXGE_HW_OK;
1706
1707 if (vp == NULL) {
1708 status = VXGE_HW_ERR_INVALID_HANDLE;
1709 goto exit;
1710 }
1711
1712 status = __vxge_hw_vpath_rts_table_set(vp,
1713 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1714 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1715 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1716 exit:
1717 return status;
1718 }
1719
1720 /**
1721 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1722 * @vp: Vpath handle.
1723 *
1724 * Enable promiscuous mode of Titan-e operation.
1725 *
1726 * See also: vxge_hw_vpath_promisc_disable().
1727 */
1728 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1729 struct __vxge_hw_vpath_handle *vp)
1730 {
1731 u64 val64;
1732 struct __vxge_hw_virtualpath *vpath;
1733 enum vxge_hw_status status = VXGE_HW_OK;
1734
1735 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1736 status = VXGE_HW_ERR_INVALID_HANDLE;
1737 goto exit;
1738 }
1739
1740 vpath = vp->vpath;
1741
1742 /* Enable promiscous mode for function 0 only */
1743 if (!(vpath->hldev->access_rights &
1744 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1745 return VXGE_HW_OK;
1746
1747 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1748
1749 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1750
1751 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1752 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1753 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1754 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1755
1756 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1757 }
1758 exit:
1759 return status;
1760 }
1761
1762 /**
1763 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
1764 * @vp: Vpath handle.
1765 *
1766 * Disable promiscuous mode of Titan-e operation.
1767 *
1768 * See also: vxge_hw_vpath_promisc_enable().
1769 */
1770 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
1771 struct __vxge_hw_vpath_handle *vp)
1772 {
1773 u64 val64;
1774 struct __vxge_hw_virtualpath *vpath;
1775 enum vxge_hw_status status = VXGE_HW_OK;
1776
1777 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1778 status = VXGE_HW_ERR_INVALID_HANDLE;
1779 goto exit;
1780 }
1781
1782 vpath = vp->vpath;
1783
1784 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1785
1786 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
1787
1788 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1789 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1790 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
1791
1792 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1793 }
1794 exit:
1795 return status;
1796 }
1797
1798 /*
1799 * vxge_hw_vpath_bcast_enable - Enable broadcast
1800 * @vp: Vpath handle.
1801 *
1802 * Enable receiving broadcasts.
1803 */
1804 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
1805 struct __vxge_hw_vpath_handle *vp)
1806 {
1807 u64 val64;
1808 struct __vxge_hw_virtualpath *vpath;
1809 enum vxge_hw_status status = VXGE_HW_OK;
1810
1811 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1812 status = VXGE_HW_ERR_INVALID_HANDLE;
1813 goto exit;
1814 }
1815
1816 vpath = vp->vpath;
1817
1818 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1819
1820 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
1821 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
1822 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1823 }
1824 exit:
1825 return status;
1826 }
1827
1828 /**
1829 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
1830 * @vp: Vpath handle.
1831 *
1832 * Enable Titan-e multicast addresses.
1833 * Returns: VXGE_HW_OK on success.
1834 *
1835 */
1836 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
1837 struct __vxge_hw_vpath_handle *vp)
1838 {
1839 u64 val64;
1840 struct __vxge_hw_virtualpath *vpath;
1841 enum vxge_hw_status status = VXGE_HW_OK;
1842
1843 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1844 status = VXGE_HW_ERR_INVALID_HANDLE;
1845 goto exit;
1846 }
1847
1848 vpath = vp->vpath;
1849
1850 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1851
1852 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
1853 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1854 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1855 }
1856 exit:
1857 return status;
1858 }
1859
1860 /**
1861 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
1862 * @vp: Vpath handle.
1863 *
1864 * Disable Titan-e multicast addresses.
1865 * Returns: VXGE_HW_OK - success.
1866 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
1867 *
1868 */
1869 enum vxge_hw_status
1870 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
1871 {
1872 u64 val64;
1873 struct __vxge_hw_virtualpath *vpath;
1874 enum vxge_hw_status status = VXGE_HW_OK;
1875
1876 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1877 status = VXGE_HW_ERR_INVALID_HANDLE;
1878 goto exit;
1879 }
1880
1881 vpath = vp->vpath;
1882
1883 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1884
1885 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
1886 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1887 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1888 }
1889 exit:
1890 return status;
1891 }
1892
1893 /*
1894 * __vxge_hw_vpath_alarm_process - Process Alarms.
1895 * @vpath: Virtual Path.
1896 * @skip_alarms: Do not clear the alarms
1897 *
1898 * Process vpath alarms.
1899 *
1900 */
1901 enum vxge_hw_status __vxge_hw_vpath_alarm_process(
1902 struct __vxge_hw_virtualpath *vpath,
1903 u32 skip_alarms)
1904 {
1905 u64 val64;
1906 u64 alarm_status;
1907 u64 pic_status;
1908 struct __vxge_hw_device *hldev = NULL;
1909 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1910 u64 mask64;
1911 struct vxge_hw_vpath_stats_sw_info *sw_stats;
1912 struct vxge_hw_vpath_reg __iomem *vp_reg;
1913
1914 if (vpath == NULL) {
1915 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1916 alarm_event);
1917 goto out2;
1918 }
1919
1920 hldev = vpath->hldev;
1921 vp_reg = vpath->vp_reg;
1922 alarm_status = readq(&vp_reg->vpath_general_int_status);
1923
1924 if (alarm_status == VXGE_HW_ALL_FOXES) {
1925 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1926 alarm_event);
1927 goto out;
1928 }
1929
1930 sw_stats = vpath->sw_stats;
1931
1932 if (alarm_status & ~(
1933 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1934 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1935 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1936 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1937 sw_stats->error_stats.unknown_alarms++;
1938
1939 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1940 alarm_event);
1941 goto out;
1942 }
1943
1944 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1945
1946 val64 = readq(&vp_reg->xgmac_vp_int_status);
1947
1948 if (val64 &
1949 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1950
1951 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1952
1953 if (((val64 &
1954 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1955 (!(val64 &
1956 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1957 ((val64 &
1958 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
1959 (!(val64 &
1960 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1961 ))) {
1962 sw_stats->error_stats.network_sustained_fault++;
1963
1964 writeq(
1965 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1966 &vp_reg->asic_ntwk_vp_err_mask);
1967
1968 __vxge_hw_device_handle_link_down_ind(hldev);
1969 alarm_event = VXGE_HW_SET_LEVEL(
1970 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1971 }
1972
1973 if (((val64 &
1974 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1975 (!(val64 &
1976 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1977 ((val64 &
1978 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
1979 (!(val64 &
1980 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1981 ))) {
1982
1983 sw_stats->error_stats.network_sustained_ok++;
1984
1985 writeq(
1986 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
1987 &vp_reg->asic_ntwk_vp_err_mask);
1988
1989 __vxge_hw_device_handle_link_up_ind(hldev);
1990 alarm_event = VXGE_HW_SET_LEVEL(
1991 VXGE_HW_EVENT_LINK_UP, alarm_event);
1992 }
1993
1994 writeq(VXGE_HW_INTR_MASK_ALL,
1995 &vp_reg->asic_ntwk_vp_err_reg);
1996
1997 alarm_event = VXGE_HW_SET_LEVEL(
1998 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
1999
2000 if (skip_alarms)
2001 return VXGE_HW_OK;
2002 }
2003 }
2004
2005 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
2006
2007 pic_status = readq(&vp_reg->vpath_ppif_int_status);
2008
2009 if (pic_status &
2010 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
2011
2012 val64 = readq(&vp_reg->general_errors_reg);
2013 mask64 = readq(&vp_reg->general_errors_mask);
2014
2015 if ((val64 &
2016 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
2017 ~mask64) {
2018 sw_stats->error_stats.ini_serr_det++;
2019
2020 alarm_event = VXGE_HW_SET_LEVEL(
2021 VXGE_HW_EVENT_SERR, alarm_event);
2022 }
2023
2024 if ((val64 &
2025 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2026 ~mask64) {
2027 sw_stats->error_stats.dblgen_fifo0_overflow++;
2028
2029 alarm_event = VXGE_HW_SET_LEVEL(
2030 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2031 }
2032
2033 if ((val64 &
2034 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2035 ~mask64)
2036 sw_stats->error_stats.statsb_pif_chain_error++;
2037
2038 if ((val64 &
2039 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2040 ~mask64)
2041 sw_stats->error_stats.statsb_drop_timeout++;
2042
2043 if ((val64 &
2044 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2045 ~mask64)
2046 sw_stats->error_stats.target_illegal_access++;
2047
2048 if (!skip_alarms) {
2049 writeq(VXGE_HW_INTR_MASK_ALL,
2050 &vp_reg->general_errors_reg);
2051 alarm_event = VXGE_HW_SET_LEVEL(
2052 VXGE_HW_EVENT_ALARM_CLEARED,
2053 alarm_event);
2054 }
2055 }
2056
2057 if (pic_status &
2058 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2059
2060 val64 = readq(&vp_reg->kdfcctl_errors_reg);
2061 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2062
2063 if ((val64 &
2064 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2065 ~mask64) {
2066 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2067
2068 alarm_event = VXGE_HW_SET_LEVEL(
2069 VXGE_HW_EVENT_FIFO_ERR,
2070 alarm_event);
2071 }
2072
2073 if ((val64 &
2074 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2075 ~mask64) {
2076 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2077
2078 alarm_event = VXGE_HW_SET_LEVEL(
2079 VXGE_HW_EVENT_FIFO_ERR,
2080 alarm_event);
2081 }
2082
2083 if ((val64 &
2084 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2085 ~mask64) {
2086 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2087
2088 alarm_event = VXGE_HW_SET_LEVEL(
2089 VXGE_HW_EVENT_FIFO_ERR,
2090 alarm_event);
2091 }
2092
2093 if (!skip_alarms) {
2094 writeq(VXGE_HW_INTR_MASK_ALL,
2095 &vp_reg->kdfcctl_errors_reg);
2096 alarm_event = VXGE_HW_SET_LEVEL(
2097 VXGE_HW_EVENT_ALARM_CLEARED,
2098 alarm_event);
2099 }
2100 }
2101
2102 }
2103
2104 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2105
2106 val64 = readq(&vp_reg->wrdma_alarm_status);
2107
2108 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2109
2110 val64 = readq(&vp_reg->prc_alarm_reg);
2111 mask64 = readq(&vp_reg->prc_alarm_mask);
2112
2113 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2114 ~mask64)
2115 sw_stats->error_stats.prc_ring_bumps++;
2116
2117 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2118 ~mask64) {
2119 sw_stats->error_stats.prc_rxdcm_sc_err++;
2120
2121 alarm_event = VXGE_HW_SET_LEVEL(
2122 VXGE_HW_EVENT_VPATH_ERR,
2123 alarm_event);
2124 }
2125
2126 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2127 & ~mask64) {
2128 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2129
2130 alarm_event = VXGE_HW_SET_LEVEL(
2131 VXGE_HW_EVENT_VPATH_ERR,
2132 alarm_event);
2133 }
2134
2135 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2136 & ~mask64) {
2137 sw_stats->error_stats.prc_quanta_size_err++;
2138
2139 alarm_event = VXGE_HW_SET_LEVEL(
2140 VXGE_HW_EVENT_VPATH_ERR,
2141 alarm_event);
2142 }
2143
2144 if (!skip_alarms) {
2145 writeq(VXGE_HW_INTR_MASK_ALL,
2146 &vp_reg->prc_alarm_reg);
2147 alarm_event = VXGE_HW_SET_LEVEL(
2148 VXGE_HW_EVENT_ALARM_CLEARED,
2149 alarm_event);
2150 }
2151 }
2152 }
2153 out:
2154 hldev->stats.sw_dev_err_stats.vpath_alarms++;
2155 out2:
2156 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2157 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2158 return VXGE_HW_OK;
2159
2160 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2161
2162 if (alarm_event == VXGE_HW_EVENT_SERR)
2163 return VXGE_HW_ERR_CRITICAL;
2164
2165 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2166 VXGE_HW_ERR_SLOT_FREEZE :
2167 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2168 VXGE_HW_ERR_VPATH;
2169 }
2170
2171 /*
2172 * vxge_hw_vpath_alarm_process - Process Alarms.
2173 * @vpath: Virtual Path.
2174 * @skip_alarms: Do not clear the alarms
2175 *
2176 * Process vpath alarms.
2177 *
2178 */
2179 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2180 struct __vxge_hw_vpath_handle *vp,
2181 u32 skip_alarms)
2182 {
2183 enum vxge_hw_status status = VXGE_HW_OK;
2184
2185 if (vp == NULL) {
2186 status = VXGE_HW_ERR_INVALID_HANDLE;
2187 goto exit;
2188 }
2189
2190 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2191 exit:
2192 return status;
2193 }
2194
2195 /**
2196 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2197 * alrms
2198 * @vp: Virtual Path handle.
2199 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2200 * interrupts(Can be repeated). If fifo or ring are not enabled
2201 * the MSIX vector for that should be set to 0
2202 * @alarm_msix_id: MSIX vector for alarm.
2203 *
2204 * This API will associate a given MSIX vector numbers with the four TIM
2205 * interrupts and alarm interrupt.
2206 */
2207 void
2208 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2209 int alarm_msix_id)
2210 {
2211 u64 val64;
2212 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2213 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2214 u32 vp_id = vp->vpath->vp_id;
2215
2216 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2217 (vp_id * 4) + tim_msix_id[0]) |
2218 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2219 (vp_id * 4) + tim_msix_id[1]);
2220
2221 writeq(val64, &vp_reg->interrupt_cfg0);
2222
2223 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2224 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2225 &vp_reg->interrupt_cfg2);
2226
2227 if (vpath->hldev->config.intr_mode ==
2228 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2229 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2230 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2231 0, 32), &vp_reg->one_shot_vect1_en);
2232 }
2233
2234 if (vpath->hldev->config.intr_mode ==
2235 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2236 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2237 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2238 0, 32), &vp_reg->one_shot_vect2_en);
2239
2240 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2241 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2242 0, 32), &vp_reg->one_shot_vect3_en);
2243 }
2244 }
2245
2246 /**
2247 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2248 * @vp: Virtual Path handle.
2249 * @msix_id: MSIX ID
2250 *
2251 * The function masks the msix interrupt for the given msix_id
2252 *
2253 * Returns: 0,
2254 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2255 * status.
2256 * See also:
2257 */
2258 void
2259 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2260 {
2261 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2262 __vxge_hw_pio_mem_write32_upper(
2263 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2264 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2265 }
2266
2267 /**
2268 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2269 * @vp: Virtual Path handle.
2270 * @msix_id: MSI ID
2271 *
2272 * The function clears the msix interrupt for the given msix_id
2273 *
2274 * Returns: 0,
2275 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2276 * status.
2277 * See also:
2278 */
2279 void
2280 vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2281 {
2282 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2283 if (hldev->config.intr_mode ==
2284 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2285 __vxge_hw_pio_mem_write32_upper(
2286 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2287 &hldev->common_reg->
2288 clr_msix_one_shot_vec[msix_id%4]);
2289 } else {
2290 __vxge_hw_pio_mem_write32_upper(
2291 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2292 &hldev->common_reg->
2293 clear_msix_mask_vect[msix_id%4]);
2294 }
2295 }
2296
2297 /**
2298 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2299 * @vp: Virtual Path handle.
2300 * @msix_id: MSI ID
2301 *
2302 * The function unmasks the msix interrupt for the given msix_id
2303 *
2304 * Returns: 0,
2305 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2306 * status.
2307 * See also:
2308 */
2309 void
2310 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2311 {
2312 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2313 __vxge_hw_pio_mem_write32_upper(
2314 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2315 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2316 }
2317
2318 /**
2319 * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2320 * @vp: Virtual Path handle.
2321 *
2322 * The function masks all msix interrupt for the given vpath
2323 *
2324 */
2325 void
2326 vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2327 {
2328
2329 __vxge_hw_pio_mem_write32_upper(
2330 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2331 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2332 }
2333
2334 /**
2335 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2336 * @vp: Virtual Path handle.
2337 *
2338 * Mask Tx and Rx vpath interrupts.
2339 *
2340 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2341 */
2342 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2343 {
2344 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2345 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2346 u64 val64;
2347 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2348
2349 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2350 tim_int_mask1, vp->vpath->vp_id);
2351
2352 val64 = readq(&hldev->common_reg->tim_int_mask0);
2353
2354 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2355 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2356 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2357 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2358 &hldev->common_reg->tim_int_mask0);
2359 }
2360
2361 val64 = readl(&hldev->common_reg->tim_int_mask1);
2362
2363 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2364 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2365 __vxge_hw_pio_mem_write32_upper(
2366 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2367 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2368 &hldev->common_reg->tim_int_mask1);
2369 }
2370 }
2371
2372 /**
2373 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2374 * @vp: Virtual Path handle.
2375 *
2376 * Unmask Tx and Rx vpath interrupts.
2377 *
2378 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2379 */
2380 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2381 {
2382 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2383 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2384 u64 val64;
2385 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2386
2387 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2388 tim_int_mask1, vp->vpath->vp_id);
2389
2390 val64 = readq(&hldev->common_reg->tim_int_mask0);
2391
2392 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2393 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2394 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2395 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2396 &hldev->common_reg->tim_int_mask0);
2397 }
2398
2399 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2400 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2401 __vxge_hw_pio_mem_write32_upper(
2402 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2403 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2404 &hldev->common_reg->tim_int_mask1);
2405 }
2406 }
2407
2408 /**
2409 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2410 * descriptors and process the same.
2411 * @ring: Handle to the ring object used for receive
2412 *
2413 * The function polls the Rx for the completed descriptors and calls
2414 * the driver via supplied completion callback.
2415 *
2416 * Returns: VXGE_HW_OK, if the polling is completed successful.
2417 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2418 * descriptors available which are yet to be processed.
2419 *
2420 * See also: vxge_hw_vpath_poll_rx()
2421 */
2422 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2423 {
2424 u8 t_code;
2425 enum vxge_hw_status status = VXGE_HW_OK;
2426 void *first_rxdh;
2427 u64 val64 = 0;
2428 int new_count = 0;
2429
2430 ring->cmpl_cnt = 0;
2431
2432 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2433 if (status == VXGE_HW_OK)
2434 ring->callback(ring, first_rxdh,
2435 t_code, ring->channel.userdata);
2436
2437 if (ring->cmpl_cnt != 0) {
2438 ring->doorbell_cnt += ring->cmpl_cnt;
2439 if (ring->doorbell_cnt >= ring->rxds_limit) {
2440 /*
2441 * Each RxD is of 4 qwords, update the number of
2442 * qwords replenished
2443 */
2444 new_count = (ring->doorbell_cnt * 4);
2445
2446 /* For each block add 4 more qwords */
2447 ring->total_db_cnt += ring->doorbell_cnt;
2448 if (ring->total_db_cnt >= ring->rxds_per_block) {
2449 new_count += 4;
2450 /* Reset total count */
2451 ring->total_db_cnt %= ring->rxds_per_block;
2452 }
2453 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2454 &ring->vp_reg->prc_rxd_doorbell);
2455 val64 =
2456 readl(&ring->common_reg->titan_general_int_status);
2457 ring->doorbell_cnt = 0;
2458 }
2459 }
2460
2461 return status;
2462 }
2463
2464 /**
2465 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2466 * the same.
2467 * @fifo: Handle to the fifo object used for non offload send
2468 *
2469 * The function polls the Tx for the completed descriptors and calls
2470 * the driver via supplied completion callback.
2471 *
2472 * Returns: VXGE_HW_OK, if the polling is completed successful.
2473 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2474 * descriptors available which are yet to be processed.
2475 */
2476 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2477 struct sk_buff ***skb_ptr, int nr_skb,
2478 int *more)
2479 {
2480 enum vxge_hw_fifo_tcode t_code;
2481 void *first_txdlh;
2482 enum vxge_hw_status status = VXGE_HW_OK;
2483 struct __vxge_hw_channel *channel;
2484
2485 channel = &fifo->channel;
2486
2487 status = vxge_hw_fifo_txdl_next_completed(fifo,
2488 &first_txdlh, &t_code);
2489 if (status == VXGE_HW_OK)
2490 if (fifo->callback(fifo, first_txdlh, t_code,
2491 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2492 status = VXGE_HW_COMPLETIONS_REMAIN;
2493
2494 return status;
2495 }
This page took 0.122998 seconds and 5 git commands to generate.