76df015f486ad9d1653efccde3538dbb47263849
[deliverable/linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_init.h
1 /* bnx2x_init.h: Broadcom Everest network driver.
2 * Structures and macroes needed during the initialization.
3 *
4 * Copyright (c) 2007-2013 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
11 * Written by: Eliezer Tamir
12 * Modified by: Vladislav Zolotarov <vladz@broadcom.com>
13 */
14
15 #ifndef BNX2X_INIT_H
16 #define BNX2X_INIT_H
17
18 /* Init operation types and structures */
19 enum {
20 OP_RD = 0x1, /* read a single register */
21 OP_WR, /* write a single register */
22 OP_SW, /* copy a string to the device */
23 OP_ZR, /* clear memory */
24 OP_ZP, /* unzip then copy with DMAE */
25 OP_WR_64, /* write 64 bit pattern */
26 OP_WB, /* copy a string using DMAE */
27 OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */
28 /* Skip the following ops if all of the init modes don't match */
29 OP_IF_MODE_OR,
30 /* Skip the following ops if any of the init modes don't match */
31 OP_IF_MODE_AND,
32 OP_MAX
33 };
34
35 enum {
36 STAGE_START,
37 STAGE_END,
38 };
39
40 /* Returns the index of start or end of a specific block stage in ops array*/
41 #define BLOCK_OPS_IDX(block, stage, end) \
42 (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
43
44
45 /* structs for the various opcodes */
46 struct raw_op {
47 u32 op:8;
48 u32 offset:24;
49 u32 raw_data;
50 };
51
52 struct op_read {
53 u32 op:8;
54 u32 offset:24;
55 u32 val;
56 };
57
58 struct op_write {
59 u32 op:8;
60 u32 offset:24;
61 u32 val;
62 };
63
64 struct op_arr_write {
65 u32 op:8;
66 u32 offset:24;
67 #ifdef __BIG_ENDIAN
68 u16 data_len;
69 u16 data_off;
70 #else /* __LITTLE_ENDIAN */
71 u16 data_off;
72 u16 data_len;
73 #endif
74 };
75
76 struct op_zero {
77 u32 op:8;
78 u32 offset:24;
79 u32 len;
80 };
81
82 struct op_if_mode {
83 u32 op:8;
84 u32 cmd_offset:24;
85 u32 mode_bit_map;
86 };
87
88
89 union init_op {
90 struct op_read read;
91 struct op_write write;
92 struct op_arr_write arr_wr;
93 struct op_zero zero;
94 struct raw_op raw;
95 struct op_if_mode if_mode;
96 };
97
98
99 /* Init Phases */
100 enum {
101 PHASE_COMMON,
102 PHASE_PORT0,
103 PHASE_PORT1,
104 PHASE_PF0,
105 PHASE_PF1,
106 PHASE_PF2,
107 PHASE_PF3,
108 PHASE_PF4,
109 PHASE_PF5,
110 PHASE_PF6,
111 PHASE_PF7,
112 NUM_OF_INIT_PHASES
113 };
114
115 /* Init Modes */
116 enum {
117 MODE_ASIC = 0x00000001,
118 MODE_FPGA = 0x00000002,
119 MODE_EMUL = 0x00000004,
120 MODE_E2 = 0x00000008,
121 MODE_E3 = 0x00000010,
122 MODE_PORT2 = 0x00000020,
123 MODE_PORT4 = 0x00000040,
124 MODE_SF = 0x00000080,
125 MODE_MF = 0x00000100,
126 MODE_MF_SD = 0x00000200,
127 MODE_MF_SI = 0x00000400,
128 MODE_MF_AFEX = 0x00000800,
129 MODE_E3_A0 = 0x00001000,
130 MODE_E3_B0 = 0x00002000,
131 MODE_COS3 = 0x00004000,
132 MODE_COS6 = 0x00008000,
133 MODE_LITTLE_ENDIAN = 0x00010000,
134 MODE_BIG_ENDIAN = 0x00020000,
135 };
136
137 /* Init Blocks */
138 enum {
139 BLOCK_ATC,
140 BLOCK_BRB1,
141 BLOCK_CCM,
142 BLOCK_CDU,
143 BLOCK_CFC,
144 BLOCK_CSDM,
145 BLOCK_CSEM,
146 BLOCK_DBG,
147 BLOCK_DMAE,
148 BLOCK_DORQ,
149 BLOCK_HC,
150 BLOCK_IGU,
151 BLOCK_MISC,
152 BLOCK_NIG,
153 BLOCK_PBF,
154 BLOCK_PGLUE_B,
155 BLOCK_PRS,
156 BLOCK_PXP2,
157 BLOCK_PXP,
158 BLOCK_QM,
159 BLOCK_SRC,
160 BLOCK_TCM,
161 BLOCK_TM,
162 BLOCK_TSDM,
163 BLOCK_TSEM,
164 BLOCK_UCM,
165 BLOCK_UPB,
166 BLOCK_USDM,
167 BLOCK_USEM,
168 BLOCK_XCM,
169 BLOCK_XPB,
170 BLOCK_XSDM,
171 BLOCK_XSEM,
172 BLOCK_MISC_AEU,
173 NUM_OF_INIT_BLOCKS
174 };
175
176 /* QM queue numbers */
177 #define BNX2X_ETH_Q 0
178 #define BNX2X_TOE_Q 3
179 #define BNX2X_TOE_ACK_Q 6
180 #define BNX2X_ISCSI_Q 9
181 #define BNX2X_ISCSI_ACK_Q 11
182 #define BNX2X_FCOE_Q 10
183
184 /* Vnics per mode */
185 #define BNX2X_PORT2_MODE_NUM_VNICS 4
186 #define BNX2X_PORT4_MODE_NUM_VNICS 2
187
188 /* COS offset for port1 in E3 B0 4port mode */
189 #define BNX2X_E3B0_PORT1_COS_OFFSET 3
190
191 /* QM Register addresses */
192 #define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\
193 (QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
194 #define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\
195 (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
196 #define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\
197 (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
198
199 /* extracts the QM queue number for the specified port and vnic */
200 #define BNX2X_PF_Q_NUM(q_num, port, vnic)\
201 ((((port) << 1) | (vnic)) * 16 + (q_num))
202
203
204 /* Maps the specified queue to the specified COS */
205 static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
206 {
207 /* find current COS mapping */
208 u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4);
209
210 /* check if queue->COS mapping has changed */
211 if (curr_cos != new_cos) {
212 u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS;
213 u32 reg_addr, reg_bit_map, vnic;
214
215 /* update parameters for 4port mode */
216 if (INIT_MODE_FLAGS(bp) & MODE_PORT4) {
217 num_vnics = BNX2X_PORT4_MODE_NUM_VNICS;
218 if (BP_PORT(bp)) {
219 curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
220 new_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
221 }
222 }
223
224 /* change queue mapping for each VNIC */
225 for (vnic = 0; vnic < num_vnics; vnic++) {
226 u32 pf_q_num =
227 BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic);
228 u32 q_bit_map = 1 << (pf_q_num & 0x1f);
229
230 /* overwrite queue->VOQ mapping */
231 REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
232
233 /* clear queue bit from current COS bit map */
234 reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
235 reg_bit_map = REG_RD(bp, reg_addr);
236 REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map));
237
238 /* set queue bit in new COS bit map */
239 reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
240 reg_bit_map = REG_RD(bp, reg_addr);
241 REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
242
243 /* set/clear queue bit in command-queue bit map
244 * (E2/E3A0 only, valid COS values are 0/1)
245 */
246 if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
247 reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
248 reg_bit_map = REG_RD(bp, reg_addr);
249 q_bit_map = 1 << (2 * (pf_q_num & 0xf));
250 reg_bit_map = new_cos ?
251 (reg_bit_map | q_bit_map) :
252 (reg_bit_map & (~q_bit_map));
253 REG_WR(bp, reg_addr, reg_bit_map);
254 }
255 }
256 }
257 }
258
259 /* Configures the QM according to the specified per-traffic-type COSes */
260 static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
261 struct priority_cos *traffic_cos)
262 {
263 bnx2x_map_q_cos(bp, BNX2X_FCOE_Q,
264 traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
265 bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q,
266 traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
267 bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
268 traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
269 if (mode != STATIC_COS) {
270 /* required only in backward compatible COS mode */
271 bnx2x_map_q_cos(bp, BNX2X_ETH_Q,
272 traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
273 bnx2x_map_q_cos(bp, BNX2X_TOE_Q,
274 traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
275 bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q,
276 traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
277 }
278 }
279
280
281 /* congestion managment port init api description
282 * the api works as follows:
283 * the driver should pass the cmng_init_input struct, the port_init function
284 * will prepare the required internal ram structure which will be passed back
285 * to the driver (cmng_init) that will write it into the internal ram.
286 *
287 * IMPORTANT REMARKS:
288 * 1. the cmng_init struct does not represent the contiguous internal ram
289 * structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
290 * offset in order to write the port sub struct and the
291 * PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
292 * words - don't use memcpy!).
293 * 2. although the cmng_init struct is filled for the maximal vnic number
294 * possible, the driver should only write the valid vnics into the internal
295 * ram according to the appropriate port mode.
296 */
297 #define BITS_TO_BYTES(x) ((x)/8)
298
299 /* CMNG constants, as derived from system spec calculations */
300
301 /* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
302 #define DEF_MIN_RATE 100
303
304 /* resolution of the rate shaping timer - 400 usec */
305 #define RS_PERIODIC_TIMEOUT_USEC 400
306
307 /* number of bytes in single QM arbitration cycle -
308 * coefficient for calculating the fairness timer
309 */
310 #define QM_ARB_BYTES 160000
311
312 /* resolution of Min algorithm 1:100 */
313 #define MIN_RES 100
314
315 /* how many bytes above threshold for
316 * the minimal credit of Min algorithm
317 */
318 #define MIN_ABOVE_THRESH 32768
319
320 /* Fairness algorithm integration time coefficient -
321 * for calculating the actual Tfair
322 */
323 #define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
324
325 /* Memory of fairness algorithm - 2 cycles */
326 #define FAIR_MEM 2
327 #define SAFC_TIMEOUT_USEC 52
328
329 #define SDM_TICKS 4
330
331
332 static inline void bnx2x_init_max(const struct cmng_init_input *input_data,
333 u32 r_param, struct cmng_init *ram_data)
334 {
335 u32 vnic;
336 struct cmng_vnic *vdata = &ram_data->vnic;
337 struct cmng_struct_per_port *pdata = &ram_data->port;
338 /* rate shaping per-port variables
339 * 100 micro seconds in SDM ticks = 25
340 * since each tick is 4 microSeconds
341 */
342
343 pdata->rs_vars.rs_periodic_timeout =
344 RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
345
346 /* this is the threshold below which no timer arming will occur.
347 * 1.25 coefficient is for the threshold to be a little bigger
348 * then the real time to compensate for timer in-accuracy
349 */
350 pdata->rs_vars.rs_threshold =
351 (5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
352
353 /* rate shaping per-vnic variables */
354 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
355 /* global vnic counter */
356 vdata->vnic_max_rate[vnic].vn_counter.rate =
357 input_data->vnic_max_rate[vnic];
358 /* maximal Mbps for this vnic
359 * the quota in each timer period - number of bytes
360 * transmitted in this period
361 */
362 vdata->vnic_max_rate[vnic].vn_counter.quota =
363 RS_PERIODIC_TIMEOUT_USEC *
364 (u32)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
365 }
366
367 }
368
369 static inline void bnx2x_init_min(const struct cmng_init_input *input_data,
370 u32 r_param, struct cmng_init *ram_data)
371 {
372 u32 vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
373 struct cmng_vnic *vdata = &ram_data->vnic;
374 struct cmng_struct_per_port *pdata = &ram_data->port;
375
376 /* this is the resolution of the fairness timer */
377 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
378
379 /* fairness per-port variables
380 * for 10G it is 1000usec. for 1G it is 10000usec.
381 */
382 tFair = T_FAIR_COEF / input_data->port_rate;
383
384 /* this is the threshold below which we won't arm the timer anymore */
385 pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
386
387 /* we multiply by 1e3/8 to get bytes/msec. We don't want the credits
388 * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
389 */
390 pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
391
392 /* since each tick is 4 microSeconds */
393 pdata->fair_vars.fairness_timeout =
394 fair_periodic_timeout_usec / SDM_TICKS;
395
396 /* calculate sum of weights */
397 vnicWeightSum = 0;
398
399 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++)
400 vnicWeightSum += input_data->vnic_min_rate[vnic];
401
402 /* global vnic counter */
403 if (vnicWeightSum > 0) {
404 /* fairness per-vnic variables */
405 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
406 /* this is the credit for each period of the fairness
407 * algorithm - number of bytes in T_FAIR (this vnic
408 * share of the port rate)
409 */
410 vdata->vnic_min_rate[vnic].vn_credit_delta =
411 (u32)input_data->vnic_min_rate[vnic] * 100 *
412 (T_FAIR_COEF / (8 * 100 * vnicWeightSum));
413 if (vdata->vnic_min_rate[vnic].vn_credit_delta <
414 pdata->fair_vars.fair_threshold +
415 MIN_ABOVE_THRESH) {
416 vdata->vnic_min_rate[vnic].vn_credit_delta =
417 pdata->fair_vars.fair_threshold +
418 MIN_ABOVE_THRESH;
419 }
420 }
421 }
422 }
423
424 static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data,
425 u32 r_param, struct cmng_init *ram_data)
426 {
427 u32 vnic, cos;
428 u32 cosWeightSum = 0;
429 struct cmng_vnic *vdata = &ram_data->vnic;
430 struct cmng_struct_per_port *pdata = &ram_data->port;
431
432 for (cos = 0; cos < MAX_COS_NUMBER; cos++)
433 cosWeightSum += input_data->cos_min_rate[cos];
434
435 if (cosWeightSum > 0) {
436
437 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
438 /* Since cos and vnic shouldn't work together the rate
439 * to divide between the coses is the port rate.
440 */
441 u32 *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
442 for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
443 /* this is the credit for each period of
444 * the fairness algorithm - number of bytes
445 * in T_FAIR (this cos share of the vnic rate)
446 */
447 ccd[cos] =
448 (u32)input_data->cos_min_rate[cos] * 100 *
449 (T_FAIR_COEF / (8 * 100 * cosWeightSum));
450 if (ccd[cos] < pdata->fair_vars.fair_threshold
451 + MIN_ABOVE_THRESH) {
452 ccd[cos] =
453 pdata->fair_vars.fair_threshold +
454 MIN_ABOVE_THRESH;
455 }
456 }
457 }
458 }
459 }
460
461 static inline void bnx2x_init_safc(const struct cmng_init_input *input_data,
462 struct cmng_init *ram_data)
463 {
464 /* in microSeconds */
465 ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
466 }
467
468 /* Congestion management port init */
469 static inline void bnx2x_init_cmng(const struct cmng_init_input *input_data,
470 struct cmng_init *ram_data)
471 {
472 u32 r_param;
473 memset(ram_data, 0, sizeof(struct cmng_init));
474
475 ram_data->port.flags = input_data->flags;
476
477 /* number of bytes transmitted in a rate of 10Gbps
478 * in one usec = 1.25KB.
479 */
480 r_param = BITS_TO_BYTES(input_data->port_rate);
481 bnx2x_init_max(input_data, r_param, ram_data);
482 bnx2x_init_min(input_data, r_param, ram_data);
483 bnx2x_init_fw_wrr(input_data, r_param, ram_data);
484 bnx2x_init_safc(input_data, ram_data);
485 }
486
487
488
489 /* Returns the index of start or end of a specific block stage in ops array */
490 #define BLOCK_OPS_IDX(block, stage, end) \
491 (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
492
493
494 #define INITOP_SET 0 /* set the HW directly */
495 #define INITOP_CLEAR 1 /* clear the HW directly */
496 #define INITOP_INIT 2 /* set the init-value array */
497
498 /****************************************************************************
499 * ILT management
500 ****************************************************************************/
501 struct ilt_line {
502 dma_addr_t page_mapping;
503 void *page;
504 u32 size;
505 };
506
507 struct ilt_client_info {
508 u32 page_size;
509 u16 start;
510 u16 end;
511 u16 client_num;
512 u16 flags;
513 #define ILT_CLIENT_SKIP_INIT 0x1
514 #define ILT_CLIENT_SKIP_MEM 0x2
515 };
516
517 struct bnx2x_ilt {
518 u32 start_line;
519 struct ilt_line *lines;
520 struct ilt_client_info clients[4];
521 #define ILT_CLIENT_CDU 0
522 #define ILT_CLIENT_QM 1
523 #define ILT_CLIENT_SRC 2
524 #define ILT_CLIENT_TM 3
525 };
526
527 /****************************************************************************
528 * SRC configuration
529 ****************************************************************************/
530 struct src_ent {
531 u8 opaque[56];
532 u64 next;
533 };
534
535 /****************************************************************************
536 * Parity configuration
537 ****************************************************************************/
538 #define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
539 { \
540 block##_REG_##block##_PRTY_MASK, \
541 block##_REG_##block##_PRTY_STS_CLR, \
542 en_mask, {m1, m1h, m2, m3}, #block \
543 }
544
545 #define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
546 { \
547 block##_REG_##block##_PRTY_MASK_0, \
548 block##_REG_##block##_PRTY_STS_CLR_0, \
549 en_mask, {m1, m1h, m2, m3}, #block"_0" \
550 }
551
552 #define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
553 { \
554 block##_REG_##block##_PRTY_MASK_1, \
555 block##_REG_##block##_PRTY_STS_CLR_1, \
556 en_mask, {m1, m1h, m2, m3}, #block"_1" \
557 }
558
559 static const struct {
560 u32 mask_addr;
561 u32 sts_clr_addr;
562 u32 en_mask; /* Mask to enable parity attentions */
563 struct {
564 u32 e1; /* 57710 */
565 u32 e1h; /* 57711 */
566 u32 e2; /* 57712 */
567 u32 e3; /* 578xx */
568 } reg_mask; /* Register mask (all valid bits) */
569 char name[8]; /* Block's longest name is 7 characters long
570 * (name + suffix)
571 */
572 } bnx2x_blocks_parity_data[] = {
573 /* bit 19 masked */
574 /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
575 /* bit 5,18,20-31 */
576 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
577 /* bit 5 */
578 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
579 /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
580 /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
581
582 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
583 * want to handle "system kill" flow at the moment.
584 */
585 BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff,
586 0x7ffffff),
587 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
588 0xffffffff),
589 BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff),
590 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0),
591 BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0),
592 BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0, 0xffffffff, 0xffffffff),
593 BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0, 0xff, 0xffff),
594 BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff),
595 BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1),
596 BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff),
597 BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f),
598 BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3),
599 BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3),
600 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
601 GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
602 {0xf, 0xf, 0xf, 0xf}, "UPB"},
603 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
604 GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
605 {0xf, 0xf, 0xf, 0xf}, "XPB"},
606 BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7),
607 BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f),
608 BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f),
609 BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1),
610 BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf),
611 BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf),
612 BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff),
613 BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff),
614 BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f),
615 BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
616 BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
617 BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
618 BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
619 BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
620 BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
621 BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
622 BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
623 BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
624 0xffffffff),
625 BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
626 BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
627 0xffffffff),
628 BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
629 BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
630 0xffffffff),
631 BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
632 BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
633 0xffffffff),
634 BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
635 };
636
637
638 /* [28] MCP Latched rom_parity
639 * [29] MCP Latched ump_rx_parity
640 * [30] MCP Latched ump_tx_parity
641 * [31] MCP Latched scpad_parity
642 */
643 #define MISC_AEU_ENABLE_MCP_PRTY_BITS \
644 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
645 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
646 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
647 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
648
649 /* Below registers control the MCP parity attention output. When
650 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
651 * enabled, when cleared - disabled.
652 */
653 static const u32 mcp_attn_ctl_regs[] = {
654 MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
655 MISC_REG_AEU_ENABLE4_NIG_0,
656 MISC_REG_AEU_ENABLE4_PXP_0,
657 MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
658 MISC_REG_AEU_ENABLE4_NIG_1,
659 MISC_REG_AEU_ENABLE4_PXP_1
660 };
661
662 static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
663 {
664 int i;
665 u32 reg_val;
666
667 for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
668 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
669
670 if (enable)
671 reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
672 else
673 reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
674
675 REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
676 }
677 }
678
679 static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
680 {
681 if (CHIP_IS_E1(bp))
682 return bnx2x_blocks_parity_data[idx].reg_mask.e1;
683 else if (CHIP_IS_E1H(bp))
684 return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
685 else if (CHIP_IS_E2(bp))
686 return bnx2x_blocks_parity_data[idx].reg_mask.e2;
687 else /* CHIP_IS_E3 */
688 return bnx2x_blocks_parity_data[idx].reg_mask.e3;
689 }
690
691 static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
692 {
693 int i;
694
695 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
696 u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
697
698 if (dis_mask) {
699 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
700 dis_mask);
701 DP(NETIF_MSG_HW, "Setting parity mask "
702 "for %s to\t\t0x%x\n",
703 bnx2x_blocks_parity_data[i].name, dis_mask);
704 }
705 }
706
707 /* Disable MCP parity attentions */
708 bnx2x_set_mcp_parity(bp, false);
709 }
710
711 /* Clear the parity error status registers. */
712 static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
713 {
714 int i;
715 u32 reg_val, mcp_aeu_bits =
716 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
717 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
718 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
719 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
720
721 /* Clear SEM_FAST parities */
722 REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
723 REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
724 REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
725 REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
726
727 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
728 u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
729
730 if (reg_mask) {
731 reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
732 sts_clr_addr);
733 if (reg_val & reg_mask)
734 DP(NETIF_MSG_HW,
735 "Parity errors in %s: 0x%x\n",
736 bnx2x_blocks_parity_data[i].name,
737 reg_val & reg_mask);
738 }
739 }
740
741 /* Check if there were parity attentions in MCP */
742 reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
743 if (reg_val & mcp_aeu_bits)
744 DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
745 reg_val & mcp_aeu_bits);
746
747 /* Clear parity attentions in MCP:
748 * [7] clears Latched rom_parity
749 * [8] clears Latched ump_rx_parity
750 * [9] clears Latched ump_tx_parity
751 * [10] clears Latched scpad_parity (both ports)
752 */
753 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
754 }
755
756 static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
757 {
758 int i;
759
760 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
761 u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
762
763 if (reg_mask)
764 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
765 bnx2x_blocks_parity_data[i].en_mask & reg_mask);
766 }
767
768 /* Enable MCP parity attentions */
769 bnx2x_set_mcp_parity(bp, true);
770 }
771
772
773 #endif /* BNX2X_INIT_H */
774
This page took 0.047092 seconds and 4 git commands to generate.