1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/etherdevice.h>
21 #include <linux/qed/qed_chain.h>
22 #include <linux/qed/qed_if.h>
25 #include "qed_dev_api.h"
28 #include "qed_init_ops.h"
31 #include "qed_reg_addr.h"
33 #include "qed_sriov.h"
36 /* API common to all protocols */
38 BAR_ID_0
, /* used for GRC */
39 BAR_ID_1
/* Used for doorbells */
42 static u32
qed_hw_bar_size(struct qed_hwfn
*p_hwfn
,
45 u32 bar_reg
= (bar_id
== BAR_ID_0
?
46 PGLUE_B_REG_PF_BAR0_SIZE
: PGLUE_B_REG_PF_BAR1_SIZE
);
49 if (IS_VF(p_hwfn
->cdev
))
52 val
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
, bar_reg
);
54 return 1 << (val
+ 15);
56 /* Old MFW initialized above registered only conditionally */
57 if (p_hwfn
->cdev
->num_hwfns
> 1) {
59 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
60 return BAR_ID_0
? 256 * 1024 : 512 * 1024;
63 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
68 void qed_init_dp(struct qed_dev
*cdev
,
69 u32 dp_module
, u8 dp_level
)
73 cdev
->dp_level
= dp_level
;
74 cdev
->dp_module
= dp_module
;
75 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
76 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
78 p_hwfn
->dp_level
= dp_level
;
79 p_hwfn
->dp_module
= dp_module
;
83 void qed_init_struct(struct qed_dev
*cdev
)
87 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
88 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
92 p_hwfn
->b_active
= false;
94 mutex_init(&p_hwfn
->dmae_info
.mutex
);
97 /* hwfn 0 is always active */
98 cdev
->hwfns
[0].b_active
= true;
100 /* set the default cache alignment to 128 */
101 cdev
->cache_shift
= 7;
104 static void qed_qm_info_free(struct qed_hwfn
*p_hwfn
)
106 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
108 kfree(qm_info
->qm_pq_params
);
109 qm_info
->qm_pq_params
= NULL
;
110 kfree(qm_info
->qm_vport_params
);
111 qm_info
->qm_vport_params
= NULL
;
112 kfree(qm_info
->qm_port_params
);
113 qm_info
->qm_port_params
= NULL
;
114 kfree(qm_info
->wfq_data
);
115 qm_info
->wfq_data
= NULL
;
118 void qed_resc_free(struct qed_dev
*cdev
)
125 kfree(cdev
->fw_data
);
126 cdev
->fw_data
= NULL
;
128 kfree(cdev
->reset_stats
);
130 for_each_hwfn(cdev
, i
) {
131 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
133 kfree(p_hwfn
->p_tx_cids
);
134 p_hwfn
->p_tx_cids
= NULL
;
135 kfree(p_hwfn
->p_rx_cids
);
136 p_hwfn
->p_rx_cids
= NULL
;
139 for_each_hwfn(cdev
, i
) {
140 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
142 qed_cxt_mngr_free(p_hwfn
);
143 qed_qm_info_free(p_hwfn
);
144 qed_spq_free(p_hwfn
);
145 qed_eq_free(p_hwfn
, p_hwfn
->p_eq
);
146 qed_consq_free(p_hwfn
, p_hwfn
->p_consq
);
147 qed_int_free(p_hwfn
);
148 qed_iov_free(p_hwfn
);
149 qed_dmae_info_free(p_hwfn
);
153 static int qed_init_qm_info(struct qed_hwfn
*p_hwfn
)
155 u8 num_vports
, vf_offset
= 0, i
, vport_id
, num_ports
, curr_queue
= 0;
156 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
157 struct init_qm_port_params
*p_qm_port
;
158 u16 num_pqs
, multi_cos_tcs
= 1;
161 #ifdef CONFIG_QED_SRIOV
162 if (p_hwfn
->cdev
->p_iov_info
)
163 num_vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
165 memset(qm_info
, 0, sizeof(*qm_info
));
167 num_pqs
= multi_cos_tcs
+ num_vfs
+ 1; /* The '1' is for pure-LB */
168 num_vports
= (u8
)RESC_NUM(p_hwfn
, QED_VPORT
);
170 /* Sanity checking that setup requires legal number of resources */
171 if (num_pqs
> RESC_NUM(p_hwfn
, QED_PQ
)) {
173 "Need too many Physical queues - 0x%04x when only %04x are available\n",
174 num_pqs
, RESC_NUM(p_hwfn
, QED_PQ
));
178 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
180 qm_info
->qm_pq_params
= kzalloc(sizeof(*qm_info
->qm_pq_params
) *
181 num_pqs
, GFP_KERNEL
);
182 if (!qm_info
->qm_pq_params
)
185 qm_info
->qm_vport_params
= kzalloc(sizeof(*qm_info
->qm_vport_params
) *
186 num_vports
, GFP_KERNEL
);
187 if (!qm_info
->qm_vport_params
)
190 qm_info
->qm_port_params
= kzalloc(sizeof(*qm_info
->qm_port_params
) *
191 MAX_NUM_PORTS
, GFP_KERNEL
);
192 if (!qm_info
->qm_port_params
)
195 qm_info
->wfq_data
= kcalloc(num_vports
, sizeof(*qm_info
->wfq_data
),
197 if (!qm_info
->wfq_data
)
200 vport_id
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
202 /* First init per-TC PQs */
203 for (i
= 0; i
< multi_cos_tcs
; i
++, curr_queue
++) {
204 struct init_qm_pq_params
*params
=
205 &qm_info
->qm_pq_params
[curr_queue
];
207 params
->vport_id
= vport_id
;
208 params
->tc_id
= p_hwfn
->hw_info
.non_offload_tc
;
209 params
->wrr_group
= 1;
212 /* Then init pure-LB PQ */
213 qm_info
->pure_lb_pq
= curr_queue
;
214 qm_info
->qm_pq_params
[curr_queue
].vport_id
=
215 (u8
) RESC_START(p_hwfn
, QED_VPORT
);
216 qm_info
->qm_pq_params
[curr_queue
].tc_id
= PURE_LB_TC
;
217 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
220 qm_info
->offload_pq
= 0;
221 /* Then init per-VF PQs */
222 vf_offset
= curr_queue
;
223 for (i
= 0; i
< num_vfs
; i
++) {
224 /* First vport is used by the PF */
225 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
+ i
+ 1;
226 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
227 p_hwfn
->hw_info
.non_offload_tc
;
228 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
232 qm_info
->vf_queues_offset
= vf_offset
;
233 qm_info
->num_pqs
= num_pqs
;
234 qm_info
->num_vports
= num_vports
;
236 /* Initialize qm port parameters */
237 num_ports
= p_hwfn
->cdev
->num_ports_in_engines
;
238 for (i
= 0; i
< num_ports
; i
++) {
239 p_qm_port
= &qm_info
->qm_port_params
[i
];
240 p_qm_port
->active
= 1;
241 p_qm_port
->num_active_phys_tcs
= 4;
242 p_qm_port
->num_pbf_cmd_lines
= PBF_MAX_CMD_LINES
/ num_ports
;
243 p_qm_port
->num_btb_blocks
= BTB_MAX_BLOCKS
/ num_ports
;
246 qm_info
->max_phys_tcs_per_port
= NUM_OF_PHYS_TCS
;
248 qm_info
->start_pq
= (u16
)RESC_START(p_hwfn
, QED_PQ
);
250 qm_info
->num_vf_pqs
= num_vfs
;
251 qm_info
->start_vport
= (u8
) RESC_START(p_hwfn
, QED_VPORT
);
253 for (i
= 0; i
< qm_info
->num_vports
; i
++)
254 qm_info
->qm_vport_params
[i
].vport_wfq
= 1;
258 qm_info
->vport_rl_en
= 1;
259 qm_info
->vport_wfq_en
= 1;
264 DP_NOTICE(p_hwfn
, "Failed to allocate memory for QM params\n");
265 qed_qm_info_free(p_hwfn
);
269 int qed_resc_alloc(struct qed_dev
*cdev
)
271 struct qed_consq
*p_consq
;
278 cdev
->fw_data
= kzalloc(sizeof(*cdev
->fw_data
), GFP_KERNEL
);
282 /* Allocate Memory for the Queue->CID mapping */
283 for_each_hwfn(cdev
, i
) {
284 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
285 int tx_size
= sizeof(struct qed_hw_cid_data
) *
286 RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
287 int rx_size
= sizeof(struct qed_hw_cid_data
) *
288 RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
290 p_hwfn
->p_tx_cids
= kzalloc(tx_size
, GFP_KERNEL
);
291 if (!p_hwfn
->p_tx_cids
) {
293 "Failed to allocate memory for Tx Cids\n");
298 p_hwfn
->p_rx_cids
= kzalloc(rx_size
, GFP_KERNEL
);
299 if (!p_hwfn
->p_rx_cids
) {
301 "Failed to allocate memory for Rx Cids\n");
307 for_each_hwfn(cdev
, i
) {
308 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
310 /* First allocate the context manager structure */
311 rc
= qed_cxt_mngr_alloc(p_hwfn
);
315 /* Set the HW cid/tid numbers (in the contest manager)
316 * Must be done prior to any further computations.
318 rc
= qed_cxt_set_pf_params(p_hwfn
);
322 /* Prepare and process QM requirements */
323 rc
= qed_init_qm_info(p_hwfn
);
327 /* Compute the ILT client partition */
328 rc
= qed_cxt_cfg_ilt_compute(p_hwfn
);
332 /* CID map / ILT shadow table / T2
333 * The talbes sizes are determined by the computations above
335 rc
= qed_cxt_tables_alloc(p_hwfn
);
339 /* SPQ, must follow ILT because initializes SPQ context */
340 rc
= qed_spq_alloc(p_hwfn
);
344 /* SP status block allocation */
345 p_hwfn
->p_dpc_ptt
= qed_get_reserved_ptt(p_hwfn
,
348 rc
= qed_int_alloc(p_hwfn
, p_hwfn
->p_main_ptt
);
352 rc
= qed_iov_alloc(p_hwfn
);
357 p_eq
= qed_eq_alloc(p_hwfn
, 256);
364 p_consq
= qed_consq_alloc(p_hwfn
);
369 p_hwfn
->p_consq
= p_consq
;
371 /* DMA info initialization */
372 rc
= qed_dmae_info_alloc(p_hwfn
);
375 "Failed to allocate memory for dmae_info structure\n");
380 cdev
->reset_stats
= kzalloc(sizeof(*cdev
->reset_stats
), GFP_KERNEL
);
381 if (!cdev
->reset_stats
) {
382 DP_NOTICE(cdev
, "Failed to allocate reset statistics\n");
394 void qed_resc_setup(struct qed_dev
*cdev
)
401 for_each_hwfn(cdev
, i
) {
402 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
404 qed_cxt_mngr_setup(p_hwfn
);
405 qed_spq_setup(p_hwfn
);
406 qed_eq_setup(p_hwfn
, p_hwfn
->p_eq
);
407 qed_consq_setup(p_hwfn
, p_hwfn
->p_consq
);
409 /* Read shadow of current MFW mailbox */
410 qed_mcp_read_mb(p_hwfn
, p_hwfn
->p_main_ptt
);
411 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
412 p_hwfn
->mcp_info
->mfw_mb_cur
,
413 p_hwfn
->mcp_info
->mfw_mb_length
);
415 qed_int_setup(p_hwfn
, p_hwfn
->p_main_ptt
);
417 qed_iov_setup(p_hwfn
, p_hwfn
->p_main_ptt
);
421 #define FINAL_CLEANUP_POLL_CNT (100)
422 #define FINAL_CLEANUP_POLL_TIME (10)
423 int qed_final_cleanup(struct qed_hwfn
*p_hwfn
,
424 struct qed_ptt
*p_ptt
, u16 id
, bool is_vf
)
426 u32 command
= 0, addr
, count
= FINAL_CLEANUP_POLL_CNT
;
429 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+
430 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn
->rel_pf_id
);
435 command
|= X_FINAL_CLEANUP_AGG_INT
<<
436 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT
;
437 command
|= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT
;
438 command
|= id
<< SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT
;
439 command
|= SDM_COMP_TYPE_AGG_INT
<< SDM_OP_GEN_COMP_TYPE_SHIFT
;
441 /* Make sure notification is not set before initiating final cleanup */
442 if (REG_RD(p_hwfn
, addr
)) {
445 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
446 REG_WR(p_hwfn
, addr
, 0);
449 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
450 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
453 qed_wr(p_hwfn
, p_ptt
, XSDM_REG_OPERATION_GEN
, command
);
455 /* Poll until completion */
456 while (!REG_RD(p_hwfn
, addr
) && count
--)
457 msleep(FINAL_CLEANUP_POLL_TIME
);
459 if (REG_RD(p_hwfn
, addr
))
463 "Failed to receive FW final cleanup notification\n");
465 /* Cleanup afterwards */
466 REG_WR(p_hwfn
, addr
, 0);
471 static void qed_calc_hw_mode(struct qed_hwfn
*p_hwfn
)
475 hw_mode
= (1 << MODE_BB_B0
);
477 switch (p_hwfn
->cdev
->num_ports_in_engines
) {
479 hw_mode
|= 1 << MODE_PORTS_PER_ENG_1
;
482 hw_mode
|= 1 << MODE_PORTS_PER_ENG_2
;
485 hw_mode
|= 1 << MODE_PORTS_PER_ENG_4
;
488 DP_NOTICE(p_hwfn
, "num_ports_in_engine = %d not supported\n",
489 p_hwfn
->cdev
->num_ports_in_engines
);
493 switch (p_hwfn
->cdev
->mf_mode
) {
496 hw_mode
|= 1 << MODE_MF_SI
;
499 hw_mode
|= 1 << MODE_MF_SD
;
502 DP_NOTICE(p_hwfn
, "Unsupported MF mode, init as DEFAULT\n");
503 hw_mode
|= 1 << MODE_MF_SI
;
506 hw_mode
|= 1 << MODE_ASIC
;
508 p_hwfn
->hw_info
.hw_mode
= hw_mode
;
511 /* Init run time data for all PFs on an engine. */
512 static void qed_init_cau_rt_data(struct qed_dev
*cdev
)
514 u32 offset
= CAU_REG_SB_VAR_MEMORY_RT_OFFSET
;
517 for_each_hwfn(cdev
, i
) {
518 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
519 struct qed_igu_info
*p_igu_info
;
520 struct qed_igu_block
*p_block
;
521 struct cau_sb_entry sb_entry
;
523 p_igu_info
= p_hwfn
->hw_info
.p_igu_info
;
525 for (sb_id
= 0; sb_id
< QED_MAPPING_MEMORY_SIZE(cdev
);
527 p_block
= &p_igu_info
->igu_map
.igu_blocks
[sb_id
];
531 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
532 p_block
->function_id
,
534 STORE_RT_REG_AGG(p_hwfn
, offset
+ sb_id
* 2,
540 static int qed_hw_init_common(struct qed_hwfn
*p_hwfn
,
541 struct qed_ptt
*p_ptt
,
544 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
545 struct qed_qm_common_rt_init_params params
;
546 struct qed_dev
*cdev
= p_hwfn
->cdev
;
551 qed_init_cau_rt_data(cdev
);
553 /* Program GTT windows */
554 qed_gtt_init(p_hwfn
);
556 if (p_hwfn
->mcp_info
) {
557 if (p_hwfn
->mcp_info
->func_info
.bandwidth_max
)
558 qm_info
->pf_rl_en
= 1;
559 if (p_hwfn
->mcp_info
->func_info
.bandwidth_min
)
560 qm_info
->pf_wfq_en
= 1;
563 memset(¶ms
, 0, sizeof(params
));
564 params
.max_ports_per_engine
= p_hwfn
->cdev
->num_ports_in_engines
;
565 params
.max_phys_tcs_per_port
= qm_info
->max_phys_tcs_per_port
;
566 params
.pf_rl_en
= qm_info
->pf_rl_en
;
567 params
.pf_wfq_en
= qm_info
->pf_wfq_en
;
568 params
.vport_rl_en
= qm_info
->vport_rl_en
;
569 params
.vport_wfq_en
= qm_info
->vport_wfq_en
;
570 params
.port_params
= qm_info
->qm_port_params
;
572 qed_qm_common_rt_init(p_hwfn
, ¶ms
);
574 qed_cxt_hw_init_common(p_hwfn
);
576 /* Close gate from NIG to BRB/Storm; By default they are open, but
577 * we close them to prevent NIG from passing data to reset blocks.
578 * Should have been done in the ENGINE phase, but init-tool lacks
579 * proper port-pretend capabilities.
581 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
582 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
583 qed_port_pretend(p_hwfn
, p_ptt
, p_hwfn
->port_id
^ 1);
584 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
585 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
586 qed_port_unpretend(p_hwfn
, p_ptt
);
588 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_ENGINE
, ANY_PHASE_ID
, hw_mode
);
592 qed_wr(p_hwfn
, p_ptt
, PSWRQ2_REG_L2P_VALIDATE_VFID
, 0);
593 qed_wr(p_hwfn
, p_ptt
, PGLUE_B_REG_USE_CLIENTID_IN_TAG
, 1);
595 /* Disable relaxed ordering in the PCI config space */
596 qed_wr(p_hwfn
, p_ptt
, 0x20b4,
597 qed_rd(p_hwfn
, p_ptt
, 0x20b4) & ~0x10);
599 for (vf_id
= 0; vf_id
< MAX_NUM_VFS_BB
; vf_id
++) {
600 concrete_fid
= qed_vfid_to_concrete(p_hwfn
, vf_id
);
601 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) concrete_fid
);
602 qed_wr(p_hwfn
, p_ptt
, CCFC_REG_STRONG_ENABLE_VF
, 0x1);
604 /* pretend to original PF */
605 qed_fid_pretend(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
);
610 static int qed_hw_init_port(struct qed_hwfn
*p_hwfn
,
611 struct qed_ptt
*p_ptt
,
616 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PORT
, p_hwfn
->port_id
,
621 static int qed_hw_init_pf(struct qed_hwfn
*p_hwfn
,
622 struct qed_ptt
*p_ptt
,
623 struct qed_tunn_start_params
*p_tunn
,
626 enum qed_int_mode int_mode
,
627 bool allow_npar_tx_switch
)
629 u8 rel_pf_id
= p_hwfn
->rel_pf_id
;
632 if (p_hwfn
->mcp_info
) {
633 struct qed_mcp_function_info
*p_info
;
635 p_info
= &p_hwfn
->mcp_info
->func_info
;
636 if (p_info
->bandwidth_min
)
637 p_hwfn
->qm_info
.pf_wfq
= p_info
->bandwidth_min
;
639 /* Update rate limit once we'll actually have a link */
640 p_hwfn
->qm_info
.pf_rl
= 100000;
643 qed_cxt_hw_init_pf(p_hwfn
);
645 qed_int_igu_init_rt(p_hwfn
);
647 /* Set VLAN in NIG if needed */
648 if (hw_mode
& (1 << MODE_MF_SD
)) {
649 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
, "Configuring LLH_FUNC_TAG\n");
650 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET
, 1);
651 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET
,
652 p_hwfn
->hw_info
.ovlan
);
655 /* Enable classification by MAC if needed */
656 if (hw_mode
& (1 << MODE_MF_SI
)) {
657 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
658 "Configuring TAGMAC_CLS_TYPE\n");
660 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET
, 1);
663 /* Protocl Configuration */
664 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_TCP_RT_OFFSET
, 0);
665 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_FCOE_RT_OFFSET
, 0);
666 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_ROCE_RT_OFFSET
, 0);
668 /* Cleanup chip from previous driver if such remains exist */
669 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, rel_pf_id
, false);
673 /* PF Init sequence */
674 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PF
, rel_pf_id
, hw_mode
);
678 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
679 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_QM_PF
, rel_pf_id
, hw_mode
);
683 /* Pure runtime initializations - directly to the HW */
684 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, true, true);
687 /* enable interrupts */
688 qed_int_igu_enable(p_hwfn
, p_ptt
, int_mode
);
690 /* send function start command */
691 rc
= qed_sp_pf_start(p_hwfn
, p_tunn
, p_hwfn
->cdev
->mf_mode
,
692 allow_npar_tx_switch
);
694 DP_NOTICE(p_hwfn
, "Function start ramrod failed\n");
699 static int qed_change_pci_hwfn(struct qed_hwfn
*p_hwfn
,
700 struct qed_ptt
*p_ptt
,
703 u32 delay_idx
= 0, val
, set_val
= enable
? 1 : 0;
705 /* Change PF in PXP */
706 qed_wr(p_hwfn
, p_ptt
,
707 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, set_val
);
709 /* wait until value is set - try for 1 second every 50us */
710 for (delay_idx
= 0; delay_idx
< 20000; delay_idx
++) {
711 val
= qed_rd(p_hwfn
, p_ptt
,
712 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
);
716 usleep_range(50, 60);
719 if (val
!= set_val
) {
721 "PFID_ENABLE_MASTER wasn't changed after a second\n");
728 static void qed_reset_mb_shadow(struct qed_hwfn
*p_hwfn
,
729 struct qed_ptt
*p_main_ptt
)
731 /* Read shadow of current MFW mailbox */
732 qed_mcp_read_mb(p_hwfn
, p_main_ptt
);
733 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
734 p_hwfn
->mcp_info
->mfw_mb_cur
,
735 p_hwfn
->mcp_info
->mfw_mb_length
);
738 int qed_hw_init(struct qed_dev
*cdev
,
739 struct qed_tunn_start_params
*p_tunn
,
741 enum qed_int_mode int_mode
,
742 bool allow_npar_tx_switch
,
743 const u8
*bin_fw_data
)
745 u32 load_code
, param
;
749 rc
= qed_init_fw_data(cdev
, bin_fw_data
);
754 for_each_hwfn(cdev
, i
) {
755 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
758 p_hwfn
->b_int_enabled
= 1;
762 /* Enable DMAE in PXP */
763 rc
= qed_change_pci_hwfn(p_hwfn
, p_hwfn
->p_main_ptt
, true);
765 qed_calc_hw_mode(p_hwfn
);
767 rc
= qed_mcp_load_req(p_hwfn
, p_hwfn
->p_main_ptt
,
770 DP_NOTICE(p_hwfn
, "Failed sending LOAD_REQ command\n");
774 qed_reset_mb_shadow(p_hwfn
, p_hwfn
->p_main_ptt
);
776 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
777 "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
780 p_hwfn
->first_on_engine
= (load_code
==
781 FW_MSG_CODE_DRV_LOAD_ENGINE
);
784 case FW_MSG_CODE_DRV_LOAD_ENGINE
:
785 rc
= qed_hw_init_common(p_hwfn
, p_hwfn
->p_main_ptt
,
786 p_hwfn
->hw_info
.hw_mode
);
790 case FW_MSG_CODE_DRV_LOAD_PORT
:
791 rc
= qed_hw_init_port(p_hwfn
, p_hwfn
->p_main_ptt
,
792 p_hwfn
->hw_info
.hw_mode
);
797 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
798 rc
= qed_hw_init_pf(p_hwfn
, p_hwfn
->p_main_ptt
,
799 p_tunn
, p_hwfn
->hw_info
.hw_mode
,
800 b_hw_start
, int_mode
,
801 allow_npar_tx_switch
);
810 "init phase failed for loadcode 0x%x (rc %d)\n",
813 /* ACK mfw regardless of success or failure of initialization */
814 mfw_rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
815 DRV_MSG_CODE_LOAD_DONE
,
816 0, &load_code
, ¶m
);
820 DP_NOTICE(p_hwfn
, "Failed sending LOAD_DONE command\n");
824 p_hwfn
->hw_init_done
= true;
830 #define QED_HW_STOP_RETRY_LIMIT (10)
831 static inline void qed_hw_timers_stop(struct qed_dev
*cdev
,
832 struct qed_hwfn
*p_hwfn
,
833 struct qed_ptt
*p_ptt
)
838 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_CONN
, 0x0);
839 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_TASK
, 0x0);
841 for (i
= 0; i
< QED_HW_STOP_RETRY_LIMIT
; i
++) {
842 if ((!qed_rd(p_hwfn
, p_ptt
,
843 TM_REG_PF_SCAN_ACTIVE_CONN
)) &&
844 (!qed_rd(p_hwfn
, p_ptt
,
845 TM_REG_PF_SCAN_ACTIVE_TASK
)))
848 /* Dependent on number of connection/tasks, possibly
849 * 1ms sleep is required between polls
851 usleep_range(1000, 2000);
854 if (i
< QED_HW_STOP_RETRY_LIMIT
)
858 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
859 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_CONN
),
860 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_TASK
));
863 void qed_hw_timers_stop_all(struct qed_dev
*cdev
)
867 for_each_hwfn(cdev
, j
) {
868 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
869 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
871 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
875 int qed_hw_stop(struct qed_dev
*cdev
)
880 for_each_hwfn(cdev
, j
) {
881 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
882 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
884 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Stopping hw/fw\n");
887 qed_vf_pf_int_cleanup(p_hwfn
);
891 /* mark the hw as uninitialized... */
892 p_hwfn
->hw_init_done
= false;
894 rc
= qed_sp_pf_stop(p_hwfn
);
897 "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
899 qed_wr(p_hwfn
, p_ptt
,
900 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
902 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
903 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
904 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
905 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
906 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
908 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
910 /* Disable Attention Generation */
911 qed_int_igu_disable_int(p_hwfn
, p_ptt
);
913 qed_wr(p_hwfn
, p_ptt
, IGU_REG_LEADING_EDGE_LATCH
, 0);
914 qed_wr(p_hwfn
, p_ptt
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
916 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, true);
918 /* Need to wait 1ms to guarantee SBs are cleared */
919 usleep_range(1000, 2000);
923 /* Disable DMAE in PXP - in CMT, this should only be done for
924 * first hw-function, and only after all transactions have
925 * stopped for all active hw-functions.
927 t_rc
= qed_change_pci_hwfn(&cdev
->hwfns
[0],
928 cdev
->hwfns
[0].p_main_ptt
, false);
936 void qed_hw_stop_fastpath(struct qed_dev
*cdev
)
940 for_each_hwfn(cdev
, j
) {
941 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
942 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
945 qed_vf_pf_int_cleanup(p_hwfn
);
951 "Shutting down the fastpath\n");
953 qed_wr(p_hwfn
, p_ptt
,
954 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
956 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
957 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
958 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
959 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
960 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
962 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, false);
964 /* Need to wait 1ms to guarantee SBs are cleared */
965 usleep_range(1000, 2000);
969 void qed_hw_start_fastpath(struct qed_hwfn
*p_hwfn
)
971 if (IS_VF(p_hwfn
->cdev
))
974 /* Re-open incoming traffic */
975 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
976 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x0);
979 static int qed_reg_assert(struct qed_hwfn
*hwfn
,
980 struct qed_ptt
*ptt
, u32 reg
,
983 u32 assert_val
= qed_rd(hwfn
, ptt
, reg
);
985 if (assert_val
!= expected
) {
986 DP_NOTICE(hwfn
, "Value at address 0x%x != 0x%08x\n",
994 int qed_hw_reset(struct qed_dev
*cdev
)
997 u32 unload_resp
, unload_param
;
1000 for_each_hwfn(cdev
, i
) {
1001 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1004 rc
= qed_vf_pf_reset(p_hwfn
);
1010 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Resetting hw/fw\n");
1012 /* Check for incorrect states */
1013 qed_reg_assert(p_hwfn
, p_hwfn
->p_main_ptt
,
1014 QM_REG_USG_CNT_PF_TX
, 0);
1015 qed_reg_assert(p_hwfn
, p_hwfn
->p_main_ptt
,
1016 QM_REG_USG_CNT_PF_OTHER
, 0);
1018 /* Disable PF in HW blocks */
1019 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, DORQ_REG_PF_DB_ENABLE
, 0);
1020 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, QM_REG_PF_EN
, 0);
1021 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1022 TCFC_REG_STRONG_ENABLE_PF
, 0);
1023 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1024 CCFC_REG_STRONG_ENABLE_PF
, 0);
1026 /* Send unload command to MCP */
1027 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1028 DRV_MSG_CODE_UNLOAD_REQ
,
1029 DRV_MB_PARAM_UNLOAD_WOL_MCP
,
1030 &unload_resp
, &unload_param
);
1032 DP_NOTICE(p_hwfn
, "qed_hw_reset: UNLOAD_REQ failed\n");
1033 unload_resp
= FW_MSG_CODE_DRV_UNLOAD_ENGINE
;
1036 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1037 DRV_MSG_CODE_UNLOAD_DONE
,
1038 0, &unload_resp
, &unload_param
);
1040 DP_NOTICE(p_hwfn
, "qed_hw_reset: UNLOAD_DONE failed\n");
1048 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
1049 static void qed_hw_hwfn_free(struct qed_hwfn
*p_hwfn
)
1051 qed_ptt_pool_free(p_hwfn
);
1052 kfree(p_hwfn
->hw_info
.p_igu_info
);
1055 /* Setup bar access */
1056 static void qed_hw_hwfn_prepare(struct qed_hwfn
*p_hwfn
)
1058 /* clear indirect access */
1059 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_88_F0
, 0);
1060 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_8C_F0
, 0);
1061 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_90_F0
, 0);
1062 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_94_F0
, 0);
1064 /* Clean Previous errors if such exist */
1065 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1066 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR
,
1067 1 << p_hwfn
->abs_pf_id
);
1069 /* enable internal target-read */
1070 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1071 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ
, 1);
1074 static void get_function_id(struct qed_hwfn
*p_hwfn
)
1077 p_hwfn
->hw_info
.opaque_fid
= (u16
)REG_RD(p_hwfn
, PXP_PF_ME_OPAQUE_ADDR
);
1079 p_hwfn
->hw_info
.concrete_fid
= REG_RD(p_hwfn
, PXP_PF_ME_CONCRETE_ADDR
);
1081 p_hwfn
->abs_pf_id
= (p_hwfn
->hw_info
.concrete_fid
>> 16) & 0xf;
1082 p_hwfn
->rel_pf_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
1083 PXP_CONCRETE_FID_PFID
);
1084 p_hwfn
->port_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
1085 PXP_CONCRETE_FID_PORT
);
1088 static void qed_hw_set_feat(struct qed_hwfn
*p_hwfn
)
1090 u32
*feat_num
= p_hwfn
->hw_info
.feat_num
;
1091 int num_features
= 1;
1093 feat_num
[QED_PF_L2_QUE
] = min_t(u32
, RESC_NUM(p_hwfn
, QED_SB
) /
1095 RESC_NUM(p_hwfn
, QED_L2_QUEUE
));
1096 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
1097 "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1098 feat_num
[QED_PF_L2_QUE
], RESC_NUM(p_hwfn
, QED_SB
),
1102 static void qed_hw_get_resc(struct qed_hwfn
*p_hwfn
)
1104 u32
*resc_start
= p_hwfn
->hw_info
.resc_start
;
1105 u8 num_funcs
= p_hwfn
->num_funcs_on_engine
;
1106 u32
*resc_num
= p_hwfn
->hw_info
.resc_num
;
1107 struct qed_sb_cnt_info sb_cnt_info
;
1108 int i
, max_vf_vlan_filters
;
1110 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
1112 #ifdef CONFIG_QED_SRIOV
1113 max_vf_vlan_filters
= QED_ETH_MAX_VF_NUM_VLAN_FILTERS
;
1115 max_vf_vlan_filters
= 0;
1118 qed_int_get_num_sbs(p_hwfn
, &sb_cnt_info
);
1120 resc_num
[QED_SB
] = min_t(u32
,
1121 (MAX_SB_PER_PATH_BB
/ num_funcs
),
1122 sb_cnt_info
.sb_cnt
);
1123 resc_num
[QED_L2_QUEUE
] = MAX_NUM_L2_QUEUES_BB
/ num_funcs
;
1124 resc_num
[QED_VPORT
] = MAX_NUM_VPORTS_BB
/ num_funcs
;
1125 resc_num
[QED_RSS_ENG
] = ETH_RSS_ENGINE_NUM_BB
/ num_funcs
;
1126 resc_num
[QED_PQ
] = MAX_QM_TX_QUEUES_BB
/ num_funcs
;
1127 resc_num
[QED_RL
] = 8;
1128 resc_num
[QED_MAC
] = ETH_NUM_MAC_FILTERS
/ num_funcs
;
1129 resc_num
[QED_VLAN
] = (ETH_NUM_VLAN_FILTERS
- 1 /*For vlan0*/) /
1131 resc_num
[QED_ILT
] = 950;
1133 for (i
= 0; i
< QED_MAX_RESC
; i
++)
1134 resc_start
[i
] = resc_num
[i
] * p_hwfn
->rel_pf_id
;
1136 qed_hw_set_feat(p_hwfn
);
1138 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
1139 "The numbers for each resource are:\n"
1140 "SB = %d start = %d\n"
1141 "L2_QUEUE = %d start = %d\n"
1142 "VPORT = %d start = %d\n"
1143 "PQ = %d start = %d\n"
1144 "RL = %d start = %d\n"
1145 "MAC = %d start = %d\n"
1146 "VLAN = %d start = %d\n"
1147 "ILT = %d start = %d\n",
1148 p_hwfn
->hw_info
.resc_num
[QED_SB
],
1149 p_hwfn
->hw_info
.resc_start
[QED_SB
],
1150 p_hwfn
->hw_info
.resc_num
[QED_L2_QUEUE
],
1151 p_hwfn
->hw_info
.resc_start
[QED_L2_QUEUE
],
1152 p_hwfn
->hw_info
.resc_num
[QED_VPORT
],
1153 p_hwfn
->hw_info
.resc_start
[QED_VPORT
],
1154 p_hwfn
->hw_info
.resc_num
[QED_PQ
],
1155 p_hwfn
->hw_info
.resc_start
[QED_PQ
],
1156 p_hwfn
->hw_info
.resc_num
[QED_RL
],
1157 p_hwfn
->hw_info
.resc_start
[QED_RL
],
1158 p_hwfn
->hw_info
.resc_num
[QED_MAC
],
1159 p_hwfn
->hw_info
.resc_start
[QED_MAC
],
1160 p_hwfn
->hw_info
.resc_num
[QED_VLAN
],
1161 p_hwfn
->hw_info
.resc_start
[QED_VLAN
],
1162 p_hwfn
->hw_info
.resc_num
[QED_ILT
],
1163 p_hwfn
->hw_info
.resc_start
[QED_ILT
]);
1166 static int qed_hw_get_nvm_info(struct qed_hwfn
*p_hwfn
,
1167 struct qed_ptt
*p_ptt
)
1169 u32 nvm_cfg1_offset
, mf_mode
, addr
, generic_cont0
, core_cfg
;
1170 u32 port_cfg_addr
, link_temp
, nvm_cfg_addr
, device_capabilities
;
1171 struct qed_mcp_link_params
*link
;
1173 /* Read global nvm_cfg address */
1174 nvm_cfg_addr
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_GEN_PURP_CR0
);
1176 /* Verify MCP has initialized it */
1177 if (!nvm_cfg_addr
) {
1178 DP_NOTICE(p_hwfn
, "Shared memory not initialized\n");
1182 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
1183 nvm_cfg1_offset
= qed_rd(p_hwfn
, p_ptt
, nvm_cfg_addr
+ 4);
1185 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1186 offsetof(struct nvm_cfg1
, glob
) +
1187 offsetof(struct nvm_cfg1_glob
, core_cfg
);
1189 core_cfg
= qed_rd(p_hwfn
, p_ptt
, addr
);
1191 switch ((core_cfg
& NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK
) >>
1192 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET
) {
1193 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G
:
1194 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X40G
;
1196 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G
:
1197 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X50G
;
1199 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G
:
1200 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X100G
;
1202 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F
:
1203 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_F
;
1205 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E
:
1206 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_E
;
1208 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G
:
1209 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X20G
;
1211 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G
:
1212 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X40G
;
1214 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G
:
1215 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X25G
;
1217 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G
:
1218 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X25G
;
1221 DP_NOTICE(p_hwfn
, "Unknown port mode in 0x%08x\n",
1226 /* Read default link configuration */
1227 link
= &p_hwfn
->mcp_info
->link_input
;
1228 port_cfg_addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1229 offsetof(struct nvm_cfg1
, port
[MFW_PORT(p_hwfn
)]);
1230 link_temp
= qed_rd(p_hwfn
, p_ptt
,
1232 offsetof(struct nvm_cfg1_port
, speed_cap_mask
));
1233 link
->speed
.advertised_speeds
=
1234 link_temp
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK
;
1236 p_hwfn
->mcp_info
->link_capabilities
.speed_capabilities
=
1237 link
->speed
.advertised_speeds
;
1239 link_temp
= qed_rd(p_hwfn
, p_ptt
,
1241 offsetof(struct nvm_cfg1_port
, link_settings
));
1242 switch ((link_temp
& NVM_CFG1_PORT_DRV_LINK_SPEED_MASK
) >>
1243 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET
) {
1244 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG
:
1245 link
->speed
.autoneg
= true;
1247 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G
:
1248 link
->speed
.forced_speed
= 1000;
1250 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G
:
1251 link
->speed
.forced_speed
= 10000;
1253 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G
:
1254 link
->speed
.forced_speed
= 25000;
1256 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G
:
1257 link
->speed
.forced_speed
= 40000;
1259 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G
:
1260 link
->speed
.forced_speed
= 50000;
1262 case NVM_CFG1_PORT_DRV_LINK_SPEED_100G
:
1263 link
->speed
.forced_speed
= 100000;
1266 DP_NOTICE(p_hwfn
, "Unknown Speed in 0x%08x\n",
1270 link_temp
&= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK
;
1271 link_temp
>>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET
;
1272 link
->pause
.autoneg
= !!(link_temp
&
1273 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG
);
1274 link
->pause
.forced_rx
= !!(link_temp
&
1275 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX
);
1276 link
->pause
.forced_tx
= !!(link_temp
&
1277 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX
);
1278 link
->loopback_mode
= 0;
1280 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1281 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1282 link
->speed
.forced_speed
, link
->speed
.advertised_speeds
,
1283 link
->speed
.autoneg
, link
->pause
.autoneg
);
1285 /* Read Multi-function information from shmem */
1286 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1287 offsetof(struct nvm_cfg1
, glob
) +
1288 offsetof(struct nvm_cfg1_glob
, generic_cont0
);
1290 generic_cont0
= qed_rd(p_hwfn
, p_ptt
, addr
);
1292 mf_mode
= (generic_cont0
& NVM_CFG1_GLOB_MF_MODE_MASK
) >>
1293 NVM_CFG1_GLOB_MF_MODE_OFFSET
;
1296 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED
:
1297 p_hwfn
->cdev
->mf_mode
= QED_MF_OVLAN
;
1299 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0
:
1300 p_hwfn
->cdev
->mf_mode
= QED_MF_NPAR
;
1302 case NVM_CFG1_GLOB_MF_MODE_DEFAULT
:
1303 p_hwfn
->cdev
->mf_mode
= QED_MF_DEFAULT
;
1306 DP_INFO(p_hwfn
, "Multi function mode is %08x\n",
1307 p_hwfn
->cdev
->mf_mode
);
1309 /* Read Multi-function information from shmem */
1310 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1311 offsetof(struct nvm_cfg1
, glob
) +
1312 offsetof(struct nvm_cfg1_glob
, device_capabilities
);
1314 device_capabilities
= qed_rd(p_hwfn
, p_ptt
, addr
);
1315 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET
)
1316 __set_bit(QED_DEV_CAP_ETH
,
1317 &p_hwfn
->hw_info
.device_capabilities
);
1319 return qed_mcp_fill_shmem_func_info(p_hwfn
, p_ptt
);
1322 static void qed_get_num_funcs(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1324 u32 reg_function_hide
, tmp
, eng_mask
;
1327 num_funcs
= MAX_NUM_PFS_BB
;
1329 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
1330 * in the other bits are selected.
1331 * Bits 1-15 are for functions 1-15, respectively, and their value is
1332 * '0' only for enabled functions (function 0 always exists and
1334 * In case of CMT, only the "even" functions are enabled, and thus the
1335 * number of functions for both hwfns is learnt from the same bits.
1337 reg_function_hide
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_FUNCTION_HIDE
);
1339 if (reg_function_hide
& 0x1) {
1340 if (QED_PATH_ID(p_hwfn
) && p_hwfn
->cdev
->num_hwfns
== 1) {
1348 /* Get the number of the enabled functions on the engine */
1349 tmp
= (reg_function_hide
^ 0xffffffff) & eng_mask
;
1357 p_hwfn
->num_funcs_on_engine
= num_funcs
;
1361 "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
1364 p_hwfn
->num_funcs_on_engine
);
1368 qed_get_hw_info(struct qed_hwfn
*p_hwfn
,
1369 struct qed_ptt
*p_ptt
,
1370 enum qed_pci_personality personality
)
1375 /* Since all information is common, only first hwfns should do this */
1376 if (IS_LEAD_HWFN(p_hwfn
)) {
1377 rc
= qed_iov_hw_info(p_hwfn
);
1382 /* Read the port mode */
1383 port_mode
= qed_rd(p_hwfn
, p_ptt
,
1384 CNIG_REG_NW_PORT_MODE_BB_B0
);
1386 if (port_mode
< 3) {
1387 p_hwfn
->cdev
->num_ports_in_engines
= 1;
1388 } else if (port_mode
<= 5) {
1389 p_hwfn
->cdev
->num_ports_in_engines
= 2;
1391 DP_NOTICE(p_hwfn
, "PORT MODE: %d not supported\n",
1392 p_hwfn
->cdev
->num_ports_in_engines
);
1394 /* Default num_ports_in_engines to something */
1395 p_hwfn
->cdev
->num_ports_in_engines
= 1;
1398 qed_hw_get_nvm_info(p_hwfn
, p_ptt
);
1400 rc
= qed_int_igu_read_cam(p_hwfn
, p_ptt
);
1404 if (qed_mcp_is_init(p_hwfn
))
1405 ether_addr_copy(p_hwfn
->hw_info
.hw_mac_addr
,
1406 p_hwfn
->mcp_info
->func_info
.mac
);
1408 eth_random_addr(p_hwfn
->hw_info
.hw_mac_addr
);
1410 if (qed_mcp_is_init(p_hwfn
)) {
1411 if (p_hwfn
->mcp_info
->func_info
.ovlan
!= QED_MCP_VLAN_UNSET
)
1412 p_hwfn
->hw_info
.ovlan
=
1413 p_hwfn
->mcp_info
->func_info
.ovlan
;
1415 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
1418 if (qed_mcp_is_init(p_hwfn
)) {
1419 enum qed_pci_personality protocol
;
1421 protocol
= p_hwfn
->mcp_info
->func_info
.protocol
;
1422 p_hwfn
->hw_info
.personality
= protocol
;
1425 qed_get_num_funcs(p_hwfn
, p_ptt
);
1427 qed_hw_get_resc(p_hwfn
);
1432 static int qed_get_dev_info(struct qed_dev
*cdev
)
1434 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
1437 /* Read Vendor Id / Device Id */
1438 pci_read_config_word(cdev
->pdev
, PCI_VENDOR_ID
,
1440 pci_read_config_word(cdev
->pdev
, PCI_DEVICE_ID
,
1442 cdev
->chip_num
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1443 MISCS_REG_CHIP_NUM
);
1444 cdev
->chip_rev
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1445 MISCS_REG_CHIP_REV
);
1446 MASK_FIELD(CHIP_REV
, cdev
->chip_rev
);
1448 cdev
->type
= QED_DEV_TYPE_BB
;
1449 /* Learn number of HW-functions */
1450 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1451 MISCS_REG_CMT_ENABLED_FOR_PAIR
);
1453 if (tmp
& (1 << p_hwfn
->rel_pf_id
)) {
1454 DP_NOTICE(cdev
->hwfns
, "device in CMT mode\n");
1455 cdev
->num_hwfns
= 2;
1457 cdev
->num_hwfns
= 1;
1460 cdev
->chip_bond_id
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1461 MISCS_REG_CHIP_TEST_REG
) >> 4;
1462 MASK_FIELD(CHIP_BOND_ID
, cdev
->chip_bond_id
);
1463 cdev
->chip_metal
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1464 MISCS_REG_CHIP_METAL
);
1465 MASK_FIELD(CHIP_METAL
, cdev
->chip_metal
);
1467 DP_INFO(cdev
->hwfns
,
1468 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1469 cdev
->chip_num
, cdev
->chip_rev
,
1470 cdev
->chip_bond_id
, cdev
->chip_metal
);
1472 if (QED_IS_BB(cdev
) && CHIP_REV_IS_A0(cdev
)) {
1473 DP_NOTICE(cdev
->hwfns
,
1474 "The chip type/rev (BB A0) is not supported!\n");
1481 static int qed_hw_prepare_single(struct qed_hwfn
*p_hwfn
,
1482 void __iomem
*p_regview
,
1483 void __iomem
*p_doorbells
,
1484 enum qed_pci_personality personality
)
1488 /* Split PCI bars evenly between hwfns */
1489 p_hwfn
->regview
= p_regview
;
1490 p_hwfn
->doorbells
= p_doorbells
;
1492 if (IS_VF(p_hwfn
->cdev
))
1493 return qed_vf_hw_prepare(p_hwfn
);
1495 /* Validate that chip access is feasible */
1496 if (REG_RD(p_hwfn
, PXP_PF_ME_OPAQUE_ADDR
) == 0xffffffff) {
1498 "Reading the ME register returns all Fs; Preventing further chip access\n");
1502 get_function_id(p_hwfn
);
1504 /* Allocate PTT pool */
1505 rc
= qed_ptt_pool_alloc(p_hwfn
);
1507 DP_NOTICE(p_hwfn
, "Failed to prepare hwfn's hw\n");
1511 /* Allocate the main PTT */
1512 p_hwfn
->p_main_ptt
= qed_get_reserved_ptt(p_hwfn
, RESERVED_PTT_MAIN
);
1514 /* First hwfn learns basic information, e.g., number of hwfns */
1515 if (!p_hwfn
->my_id
) {
1516 rc
= qed_get_dev_info(p_hwfn
->cdev
);
1521 qed_hw_hwfn_prepare(p_hwfn
);
1523 /* Initialize MCP structure */
1524 rc
= qed_mcp_cmd_init(p_hwfn
, p_hwfn
->p_main_ptt
);
1526 DP_NOTICE(p_hwfn
, "Failed initializing mcp command\n");
1530 /* Read the device configuration information from the HW and SHMEM */
1531 rc
= qed_get_hw_info(p_hwfn
, p_hwfn
->p_main_ptt
, personality
);
1533 DP_NOTICE(p_hwfn
, "Failed to get HW information\n");
1537 /* Allocate the init RT array and initialize the init-ops engine */
1538 rc
= qed_init_alloc(p_hwfn
);
1540 DP_NOTICE(p_hwfn
, "Failed to allocate the init array\n");
1546 if (IS_LEAD_HWFN(p_hwfn
))
1547 qed_iov_free_hw_info(p_hwfn
->cdev
);
1548 qed_mcp_free(p_hwfn
);
1550 qed_hw_hwfn_free(p_hwfn
);
1555 int qed_hw_prepare(struct qed_dev
*cdev
,
1558 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
1561 /* Store the precompiled init data ptrs */
1563 qed_init_iro_array(cdev
);
1565 /* Initialize the first hwfn - will learn number of hwfns */
1566 rc
= qed_hw_prepare_single(p_hwfn
,
1568 cdev
->doorbells
, personality
);
1572 personality
= p_hwfn
->hw_info
.personality
;
1574 /* Initialize the rest of the hwfns */
1575 if (cdev
->num_hwfns
> 1) {
1576 void __iomem
*p_regview
, *p_doorbell
;
1579 /* adjust bar offset for second engine */
1580 addr
= cdev
->regview
+ qed_hw_bar_size(p_hwfn
, BAR_ID_0
) / 2;
1583 /* adjust doorbell bar offset for second engine */
1584 addr
= cdev
->doorbells
+ qed_hw_bar_size(p_hwfn
, BAR_ID_1
) / 2;
1587 /* prepare second hw function */
1588 rc
= qed_hw_prepare_single(&cdev
->hwfns
[1], p_regview
,
1589 p_doorbell
, personality
);
1591 /* in case of error, need to free the previously
1592 * initiliazed hwfn 0.
1596 qed_init_free(p_hwfn
);
1597 qed_mcp_free(p_hwfn
);
1598 qed_hw_hwfn_free(p_hwfn
);
1606 void qed_hw_remove(struct qed_dev
*cdev
)
1610 for_each_hwfn(cdev
, i
) {
1611 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1614 qed_vf_pf_release(p_hwfn
);
1618 qed_init_free(p_hwfn
);
1619 qed_hw_hwfn_free(p_hwfn
);
1620 qed_mcp_free(p_hwfn
);
1623 qed_iov_free_hw_info(cdev
);
1626 int qed_chain_alloc(struct qed_dev
*cdev
,
1627 enum qed_chain_use_mode intended_use
,
1628 enum qed_chain_mode mode
,
1631 struct qed_chain
*p_chain
)
1633 dma_addr_t p_pbl_phys
= 0;
1634 void *p_pbl_virt
= NULL
;
1635 dma_addr_t p_phys
= 0;
1636 void *p_virt
= NULL
;
1640 if (mode
== QED_CHAIN_MODE_SINGLE
)
1643 page_cnt
= QED_CHAIN_PAGE_CNT(num_elems
, elem_size
, mode
);
1645 size
= page_cnt
* QED_CHAIN_PAGE_SIZE
;
1646 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
1647 size
, &p_phys
, GFP_KERNEL
);
1649 DP_NOTICE(cdev
, "Failed to allocate chain mem\n");
1653 if (mode
== QED_CHAIN_MODE_PBL
) {
1654 size
= page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
1655 p_pbl_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
1659 DP_NOTICE(cdev
, "Failed to allocate chain pbl mem\n");
1663 qed_chain_pbl_init(p_chain
, p_virt
, p_phys
, page_cnt
,
1664 (u8
)elem_size
, intended_use
,
1665 p_pbl_phys
, p_pbl_virt
);
1667 qed_chain_init(p_chain
, p_virt
, p_phys
, page_cnt
,
1668 (u8
)elem_size
, intended_use
, mode
);
1674 dma_free_coherent(&cdev
->pdev
->dev
,
1675 page_cnt
* QED_CHAIN_PAGE_SIZE
,
1677 dma_free_coherent(&cdev
->pdev
->dev
,
1678 page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
,
1679 p_pbl_virt
, p_pbl_phys
);
1684 void qed_chain_free(struct qed_dev
*cdev
,
1685 struct qed_chain
*p_chain
)
1689 if (!p_chain
->p_virt_addr
)
1692 if (p_chain
->mode
== QED_CHAIN_MODE_PBL
) {
1693 size
= p_chain
->page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
1694 dma_free_coherent(&cdev
->pdev
->dev
, size
,
1695 p_chain
->pbl
.p_virt_table
,
1696 p_chain
->pbl
.p_phys_table
);
1699 size
= p_chain
->page_cnt
* QED_CHAIN_PAGE_SIZE
;
1700 dma_free_coherent(&cdev
->pdev
->dev
, size
,
1701 p_chain
->p_virt_addr
,
1702 p_chain
->p_phys_addr
);
1705 int qed_fw_l2_queue(struct qed_hwfn
*p_hwfn
,
1706 u16 src_id
, u16
*dst_id
)
1708 if (src_id
>= RESC_NUM(p_hwfn
, QED_L2_QUEUE
)) {
1711 min
= (u16
)RESC_START(p_hwfn
, QED_L2_QUEUE
);
1712 max
= min
+ RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
1714 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1720 *dst_id
= RESC_START(p_hwfn
, QED_L2_QUEUE
) + src_id
;
1725 int qed_fw_vport(struct qed_hwfn
*p_hwfn
,
1726 u8 src_id
, u8
*dst_id
)
1728 if (src_id
>= RESC_NUM(p_hwfn
, QED_VPORT
)) {
1731 min
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
1732 max
= min
+ RESC_NUM(p_hwfn
, QED_VPORT
);
1734 "vport id [%d] is not valid, available indices [%d - %d]\n",
1740 *dst_id
= RESC_START(p_hwfn
, QED_VPORT
) + src_id
;
1745 int qed_fw_rss_eng(struct qed_hwfn
*p_hwfn
,
1746 u8 src_id
, u8
*dst_id
)
1748 if (src_id
>= RESC_NUM(p_hwfn
, QED_RSS_ENG
)) {
1751 min
= (u8
)RESC_START(p_hwfn
, QED_RSS_ENG
);
1752 max
= min
+ RESC_NUM(p_hwfn
, QED_RSS_ENG
);
1754 "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1760 *dst_id
= RESC_START(p_hwfn
, QED_RSS_ENG
) + src_id
;
1765 /* Calculate final WFQ values for all vports and configure them.
1766 * After this configuration each vport will have
1767 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
1769 static void qed_configure_wfq_for_all_vports(struct qed_hwfn
*p_hwfn
,
1770 struct qed_ptt
*p_ptt
,
1773 struct init_qm_vport_params
*vport_params
;
1776 vport_params
= p_hwfn
->qm_info
.qm_vport_params
;
1778 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
1779 u32 wfq_speed
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
1781 vport_params
[i
].vport_wfq
= (wfq_speed
* QED_WFQ_UNIT
) /
1783 qed_init_vport_wfq(p_hwfn
, p_ptt
,
1784 vport_params
[i
].first_tx_pq_id
,
1785 vport_params
[i
].vport_wfq
);
1789 static void qed_init_wfq_default_param(struct qed_hwfn
*p_hwfn
,
1795 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++)
1796 p_hwfn
->qm_info
.qm_vport_params
[i
].vport_wfq
= 1;
1799 static void qed_disable_wfq_for_all_vports(struct qed_hwfn
*p_hwfn
,
1800 struct qed_ptt
*p_ptt
,
1803 struct init_qm_vport_params
*vport_params
;
1806 vport_params
= p_hwfn
->qm_info
.qm_vport_params
;
1808 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
1809 qed_init_wfq_default_param(p_hwfn
, min_pf_rate
);
1810 qed_init_vport_wfq(p_hwfn
, p_ptt
,
1811 vport_params
[i
].first_tx_pq_id
,
1812 vport_params
[i
].vport_wfq
);
1816 /* This function performs several validations for WFQ
1817 * configuration and required min rate for a given vport
1818 * 1. req_rate must be greater than one percent of min_pf_rate.
1819 * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
1820 * rates to get less than one percent of min_pf_rate.
1821 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
1823 static int qed_init_wfq_param(struct qed_hwfn
*p_hwfn
,
1824 u16 vport_id
, u32 req_rate
,
1827 u32 total_req_min_rate
= 0, total_left_rate
= 0, left_rate_per_vp
= 0;
1828 int non_requested_count
= 0, req_count
= 0, i
, num_vports
;
1830 num_vports
= p_hwfn
->qm_info
.num_vports
;
1832 /* Accounting for the vports which are configured for WFQ explicitly */
1833 for (i
= 0; i
< num_vports
; i
++) {
1836 if ((i
!= vport_id
) &&
1837 p_hwfn
->qm_info
.wfq_data
[i
].configured
) {
1839 tmp_speed
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
1840 total_req_min_rate
+= tmp_speed
;
1844 /* Include current vport data as well */
1846 total_req_min_rate
+= req_rate
;
1847 non_requested_count
= num_vports
- req_count
;
1849 if (req_rate
< min_pf_rate
/ QED_WFQ_UNIT
) {
1850 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1851 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1852 vport_id
, req_rate
, min_pf_rate
);
1856 if (num_vports
> QED_WFQ_UNIT
) {
1857 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1858 "Number of vports is greater than %d\n",
1863 if (total_req_min_rate
> min_pf_rate
) {
1864 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1865 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
1866 total_req_min_rate
, min_pf_rate
);
1870 total_left_rate
= min_pf_rate
- total_req_min_rate
;
1872 left_rate_per_vp
= total_left_rate
/ non_requested_count
;
1873 if (left_rate_per_vp
< min_pf_rate
/ QED_WFQ_UNIT
) {
1874 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1875 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1876 left_rate_per_vp
, min_pf_rate
);
1880 p_hwfn
->qm_info
.wfq_data
[vport_id
].min_speed
= req_rate
;
1881 p_hwfn
->qm_info
.wfq_data
[vport_id
].configured
= true;
1883 for (i
= 0; i
< num_vports
; i
++) {
1884 if (p_hwfn
->qm_info
.wfq_data
[i
].configured
)
1887 p_hwfn
->qm_info
.wfq_data
[i
].min_speed
= left_rate_per_vp
;
1893 static int __qed_configure_vport_wfq(struct qed_hwfn
*p_hwfn
,
1894 struct qed_ptt
*p_ptt
, u16 vp_id
, u32 rate
)
1896 struct qed_mcp_link_state
*p_link
;
1899 p_link
= &p_hwfn
->cdev
->hwfns
[0].mcp_info
->link_output
;
1901 if (!p_link
->min_pf_rate
) {
1902 p_hwfn
->qm_info
.wfq_data
[vp_id
].min_speed
= rate
;
1903 p_hwfn
->qm_info
.wfq_data
[vp_id
].configured
= true;
1907 rc
= qed_init_wfq_param(p_hwfn
, vp_id
, rate
, p_link
->min_pf_rate
);
1910 qed_configure_wfq_for_all_vports(p_hwfn
, p_ptt
,
1911 p_link
->min_pf_rate
);
1914 "Validation failed while configuring min rate\n");
1919 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn
*p_hwfn
,
1920 struct qed_ptt
*p_ptt
,
1923 bool use_wfq
= false;
1927 /* Validate all pre configured vports for wfq */
1928 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
1931 if (!p_hwfn
->qm_info
.wfq_data
[i
].configured
)
1934 rate
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
1937 rc
= qed_init_wfq_param(p_hwfn
, i
, rate
, min_pf_rate
);
1940 "WFQ validation failed while configuring min rate\n");
1946 qed_configure_wfq_for_all_vports(p_hwfn
, p_ptt
, min_pf_rate
);
1948 qed_disable_wfq_for_all_vports(p_hwfn
, p_ptt
, min_pf_rate
);
1953 /* Main API for qed clients to configure vport min rate.
1954 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
1955 * rate - Speed in Mbps needs to be assigned to a given vport.
1957 int qed_configure_vport_wfq(struct qed_dev
*cdev
, u16 vp_id
, u32 rate
)
1959 int i
, rc
= -EINVAL
;
1961 /* Currently not supported; Might change in future */
1962 if (cdev
->num_hwfns
> 1) {
1964 "WFQ configuration is not supported for this device\n");
1968 for_each_hwfn(cdev
, i
) {
1969 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1970 struct qed_ptt
*p_ptt
;
1972 p_ptt
= qed_ptt_acquire(p_hwfn
);
1976 rc
= __qed_configure_vport_wfq(p_hwfn
, p_ptt
, vp_id
, rate
);
1979 qed_ptt_release(p_hwfn
, p_ptt
);
1983 qed_ptt_release(p_hwfn
, p_ptt
);
1989 /* API to configure WFQ from mcp link change */
1990 void qed_configure_vp_wfq_on_link_change(struct qed_dev
*cdev
, u32 min_pf_rate
)
1994 for_each_hwfn(cdev
, i
) {
1995 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1997 __qed_configure_vp_wfq_on_link_change(p_hwfn
,
2003 int __qed_configure_pf_max_bandwidth(struct qed_hwfn
*p_hwfn
,
2004 struct qed_ptt
*p_ptt
,
2005 struct qed_mcp_link_state
*p_link
,
2010 p_hwfn
->mcp_info
->func_info
.bandwidth_max
= max_bw
;
2012 if (!p_link
->line_speed
&& (max_bw
!= 100))
2015 p_link
->speed
= (p_link
->line_speed
* max_bw
) / 100;
2016 p_hwfn
->qm_info
.pf_rl
= p_link
->speed
;
2018 /* Since the limiter also affects Tx-switched traffic, we don't want it
2019 * to limit such traffic in case there's no actual limit.
2020 * In that case, set limit to imaginary high boundary.
2023 p_hwfn
->qm_info
.pf_rl
= 100000;
2025 rc
= qed_init_pf_rl(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
,
2026 p_hwfn
->qm_info
.pf_rl
);
2028 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2029 "Configured MAX bandwidth to be %08x Mb/sec\n",
2035 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
2036 int qed_configure_pf_max_bandwidth(struct qed_dev
*cdev
, u8 max_bw
)
2038 int i
, rc
= -EINVAL
;
2040 if (max_bw
< 1 || max_bw
> 100) {
2041 DP_NOTICE(cdev
, "PF max bw valid range is [1-100]\n");
2045 for_each_hwfn(cdev
, i
) {
2046 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2047 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(cdev
);
2048 struct qed_mcp_link_state
*p_link
;
2049 struct qed_ptt
*p_ptt
;
2051 p_link
= &p_lead
->mcp_info
->link_output
;
2053 p_ptt
= qed_ptt_acquire(p_hwfn
);
2057 rc
= __qed_configure_pf_max_bandwidth(p_hwfn
, p_ptt
,
2060 qed_ptt_release(p_hwfn
, p_ptt
);
2069 int __qed_configure_pf_min_bandwidth(struct qed_hwfn
*p_hwfn
,
2070 struct qed_ptt
*p_ptt
,
2071 struct qed_mcp_link_state
*p_link
,
2076 p_hwfn
->mcp_info
->func_info
.bandwidth_min
= min_bw
;
2077 p_hwfn
->qm_info
.pf_wfq
= min_bw
;
2079 if (!p_link
->line_speed
)
2082 p_link
->min_pf_rate
= (p_link
->line_speed
* min_bw
) / 100;
2084 rc
= qed_init_pf_wfq(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
, min_bw
);
2086 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2087 "Configured MIN bandwidth to be %d Mb/sec\n",
2088 p_link
->min_pf_rate
);
2093 /* Main API to configure PF min bandwidth where bw range is [1-100] */
2094 int qed_configure_pf_min_bandwidth(struct qed_dev
*cdev
, u8 min_bw
)
2096 int i
, rc
= -EINVAL
;
2098 if (min_bw
< 1 || min_bw
> 100) {
2099 DP_NOTICE(cdev
, "PF min bw valid range is [1-100]\n");
2103 for_each_hwfn(cdev
, i
) {
2104 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2105 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(cdev
);
2106 struct qed_mcp_link_state
*p_link
;
2107 struct qed_ptt
*p_ptt
;
2109 p_link
= &p_lead
->mcp_info
->link_output
;
2111 p_ptt
= qed_ptt_acquire(p_hwfn
);
2115 rc
= __qed_configure_pf_min_bandwidth(p_hwfn
, p_ptt
,
2118 qed_ptt_release(p_hwfn
, p_ptt
);
2122 if (p_link
->min_pf_rate
) {
2123 u32 min_rate
= p_link
->min_pf_rate
;
2125 rc
= __qed_configure_vp_wfq_on_link_change(p_hwfn
,
2130 qed_ptt_release(p_hwfn
, p_ptt
);
2136 void qed_clean_wfq_db(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2138 struct qed_mcp_link_state
*p_link
;
2140 p_link
= &p_hwfn
->mcp_info
->link_output
;
2142 if (p_link
->min_pf_rate
)
2143 qed_disable_wfq_for_all_vports(p_hwfn
, p_ptt
,
2144 p_link
->min_pf_rate
);
2146 memset(p_hwfn
->qm_info
.wfq_data
, 0,
2147 sizeof(*p_hwfn
->qm_info
.wfq_data
) * p_hwfn
->qm_info
.num_vports
);