1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/crc32.h>
10 #include <linux/etherdevice.h>
12 #include "qed_sriov.h"
15 static void *qed_vf_pf_prep(struct qed_hwfn
*p_hwfn
, u16 type
, u16 length
)
17 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
20 /* This lock is released when we receive PF's response
21 * in qed_send_msg2pf().
22 * So, qed_vf_pf_prep() and qed_send_msg2pf()
23 * must come in sequence.
25 mutex_lock(&(p_iov
->mutex
));
29 "preparing to send 0x%04x tlv over vf pf channel\n",
32 /* Reset Requst offset */
33 p_iov
->offset
= (u8
*)p_iov
->vf2pf_request
;
35 /* Clear mailbox - both request and reply */
36 memset(p_iov
->vf2pf_request
, 0, sizeof(union vfpf_tlvs
));
37 memset(p_iov
->pf2vf_reply
, 0, sizeof(union pfvf_tlvs
));
39 /* Init type and length */
40 p_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, type
, length
);
42 /* Init first tlv header */
43 ((struct vfpf_first_tlv
*)p_tlv
)->reply_address
=
44 (u64
)p_iov
->pf2vf_reply_phys
;
49 static int qed_send_msg2pf(struct qed_hwfn
*p_hwfn
, u8
*done
, u32 resp_size
)
51 union vfpf_tlvs
*p_req
= p_hwfn
->vf_iov_info
->vf2pf_request
;
52 struct ustorm_trigger_vf_zone trigger
;
53 struct ustorm_vf_zone
*zone_data
;
54 int rc
= 0, time
= 100;
56 zone_data
= (struct ustorm_vf_zone
*)PXP_VF_BAR0_START_USDM_ZONE_B
;
58 /* output tlvs list */
59 qed_dp_tlv_list(p_hwfn
, p_req
);
61 /* need to add the END TLV to the message size */
62 resp_size
+= sizeof(struct channel_list_end_tlv
);
64 /* Send TLVs over HW channel */
65 memset(&trigger
, 0, sizeof(struct ustorm_trigger_vf_zone
));
66 trigger
.vf_pf_msg_valid
= 1;
70 "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
71 GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
72 PXP_CONCRETE_FID_PFID
),
73 upper_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
),
74 lower_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
),
75 &zone_data
->non_trigger
.vf_pf_msg_addr
,
76 *((u32
*)&trigger
), &zone_data
->trigger
);
79 (uintptr_t)&zone_data
->non_trigger
.vf_pf_msg_addr
.lo
,
80 lower_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
));
83 (uintptr_t)&zone_data
->non_trigger
.vf_pf_msg_addr
.hi
,
84 upper_32_bits(p_hwfn
->vf_iov_info
->vf2pf_request_phys
));
86 /* The message data must be written first, to prevent trigger before
91 REG_WR(p_hwfn
, (uintptr_t)&zone_data
->trigger
, *((u32
*)&trigger
));
93 /* When PF would be done with the response, it would write back to the
94 * `done' address. Poll until then.
96 while ((!*done
) && time
) {
102 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
103 "VF <-- PF Timeout [Type %d]\n",
104 p_req
->first_tlv
.tl
.type
);
108 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
109 "PF response: %d [Type %d]\n",
110 *done
, p_req
->first_tlv
.tl
.type
);
114 mutex_unlock(&(p_hwfn
->vf_iov_info
->mutex
));
119 #define VF_ACQUIRE_THRESH 3
120 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn
*p_hwfn
,
121 struct vf_pf_resc_request
*p_req
,
122 struct pf_vf_resc
*p_resp
)
126 "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
133 p_req
->num_mac_filters
,
134 p_resp
->num_mac_filters
,
135 p_req
->num_vlan_filters
,
136 p_resp
->num_vlan_filters
,
137 p_req
->num_mc_filters
, p_resp
->num_mc_filters
);
139 /* humble our request */
140 p_req
->num_txqs
= p_resp
->num_txqs
;
141 p_req
->num_rxqs
= p_resp
->num_rxqs
;
142 p_req
->num_sbs
= p_resp
->num_sbs
;
143 p_req
->num_mac_filters
= p_resp
->num_mac_filters
;
144 p_req
->num_vlan_filters
= p_resp
->num_vlan_filters
;
145 p_req
->num_mc_filters
= p_resp
->num_mc_filters
;
148 static int qed_vf_pf_acquire(struct qed_hwfn
*p_hwfn
)
150 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
151 struct pfvf_acquire_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->acquire_resp
;
152 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
153 struct vf_pf_resc_request
*p_resc
;
154 bool resources_acquired
= false;
155 struct vfpf_acquire_tlv
*req
;
156 int rc
= 0, attempts
= 0;
158 /* clear mailbox and prep first tlv */
159 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_ACQUIRE
, sizeof(*req
));
160 p_resc
= &req
->resc_request
;
162 /* starting filling the request */
163 req
->vfdev_info
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
165 p_resc
->num_rxqs
= QED_MAX_VF_CHAINS_PER_PF
;
166 p_resc
->num_txqs
= QED_MAX_VF_CHAINS_PER_PF
;
167 p_resc
->num_sbs
= QED_MAX_VF_CHAINS_PER_PF
;
168 p_resc
->num_mac_filters
= QED_ETH_VF_NUM_MAC_FILTERS
;
169 p_resc
->num_vlan_filters
= QED_ETH_VF_NUM_VLAN_FILTERS
;
171 req
->vfdev_info
.os_type
= VFPF_ACQUIRE_OS_LINUX
;
172 req
->vfdev_info
.fw_major
= FW_MAJOR_VERSION
;
173 req
->vfdev_info
.fw_minor
= FW_MINOR_VERSION
;
174 req
->vfdev_info
.fw_revision
= FW_REVISION_VERSION
;
175 req
->vfdev_info
.fw_engineering
= FW_ENGINEERING_VERSION
;
176 req
->vfdev_info
.eth_fp_hsi_major
= ETH_HSI_VER_MAJOR
;
177 req
->vfdev_info
.eth_fp_hsi_minor
= ETH_HSI_VER_MINOR
;
179 /* Fill capability field with any non-deprecated config we support */
180 req
->vfdev_info
.capabilities
|= VFPF_ACQUIRE_CAP_100G
;
182 /* pf 2 vf bulletin board address */
183 req
->bulletin_addr
= p_iov
->bulletin
.phys
;
184 req
->bulletin_size
= p_iov
->bulletin
.size
;
186 /* add list termination tlv */
187 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
188 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
190 while (!resources_acquired
) {
192 QED_MSG_IOV
, "attempting to acquire resources\n");
194 /* send acquire request */
195 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
199 /* copy acquire response from buffer to p_hwfn */
200 memcpy(&p_iov
->acquire_resp
, resp
, sizeof(p_iov
->acquire_resp
));
204 if (resp
->hdr
.status
== PFVF_STATUS_SUCCESS
) {
205 /* PF agrees to allocate our resources */
206 if (!(resp
->pfdev_info
.capabilities
&
207 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
)) {
209 "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
212 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "resources acquired\n");
213 resources_acquired
= true;
214 } else if (resp
->hdr
.status
== PFVF_STATUS_NO_RESOURCE
&&
215 attempts
< VF_ACQUIRE_THRESH
) {
216 qed_vf_pf_acquire_reduce_resc(p_hwfn
, p_resc
,
219 /* Clear response buffer */
220 memset(p_iov
->pf2vf_reply
, 0, sizeof(union pfvf_tlvs
));
221 } else if ((resp
->hdr
.status
== PFVF_STATUS_NOT_SUPPORTED
) &&
222 pfdev_info
->major_fp_hsi
&&
223 (pfdev_info
->major_fp_hsi
!= ETH_HSI_VER_MAJOR
)) {
225 "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
226 pfdev_info
->major_fp_hsi
,
227 pfdev_info
->minor_fp_hsi
,
229 ETH_HSI_VER_MINOR
, pfdev_info
->major_fp_hsi
);
233 "PF returned error %d to VF acquisition request\n",
239 /* Update bulletin board size with response from PF */
240 p_iov
->bulletin
.size
= resp
->bulletin_size
;
243 p_hwfn
->cdev
->type
= resp
->pfdev_info
.dev_type
;
244 p_hwfn
->cdev
->chip_rev
= resp
->pfdev_info
.chip_rev
;
246 p_hwfn
->cdev
->chip_num
= pfdev_info
->chip_num
& 0xffff;
248 /* Learn of the possibility of CMT */
249 if (IS_LEAD_HWFN(p_hwfn
)) {
250 if (resp
->pfdev_info
.capabilities
& PFVF_ACQUIRE_CAP_100G
) {
251 DP_NOTICE(p_hwfn
, "100g VF\n");
252 p_hwfn
->cdev
->num_hwfns
= 2;
256 if (ETH_HSI_VER_MINOR
&&
257 (resp
->pfdev_info
.minor_fp_hsi
< ETH_HSI_VER_MINOR
)) {
259 "PF is using older fastpath HSI; %02x.%02x is configured\n",
260 ETH_HSI_VER_MAJOR
, resp
->pfdev_info
.minor_fp_hsi
);
266 int qed_vf_hw_prepare(struct qed_hwfn
*p_hwfn
)
268 struct qed_vf_iov
*p_iov
;
271 /* Set number of hwfns - might be overriden once leading hwfn learns
272 * actual configuration from PF.
274 if (IS_LEAD_HWFN(p_hwfn
))
275 p_hwfn
->cdev
->num_hwfns
= 1;
277 /* Set the doorbell bar. Assumption: regview is set */
278 p_hwfn
->doorbells
= (u8 __iomem
*)p_hwfn
->regview
+
279 PXP_VF_BAR0_START_DQ
;
281 reg
= PXP_VF_BAR0_ME_OPAQUE_ADDRESS
;
282 p_hwfn
->hw_info
.opaque_fid
= (u16
)REG_RD(p_hwfn
, reg
);
284 reg
= PXP_VF_BAR0_ME_CONCRETE_ADDRESS
;
285 p_hwfn
->hw_info
.concrete_fid
= REG_RD(p_hwfn
, reg
);
287 /* Allocate vf sriov info */
288 p_iov
= kzalloc(sizeof(*p_iov
), GFP_KERNEL
);
290 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_sriov'\n");
294 /* Allocate vf2pf msg */
295 p_iov
->vf2pf_request
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
296 sizeof(union vfpf_tlvs
),
297 &p_iov
->vf2pf_request_phys
,
299 if (!p_iov
->vf2pf_request
) {
301 "Failed to allocate `vf2pf_request' DMA memory\n");
305 p_iov
->pf2vf_reply
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
306 sizeof(union pfvf_tlvs
),
307 &p_iov
->pf2vf_reply_phys
,
309 if (!p_iov
->pf2vf_reply
) {
311 "Failed to allocate `pf2vf_reply' DMA memory\n");
312 goto free_vf2pf_request
;
317 "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
318 p_iov
->vf2pf_request
,
319 (u64
) p_iov
->vf2pf_request_phys
,
320 p_iov
->pf2vf_reply
, (u64
)p_iov
->pf2vf_reply_phys
);
322 /* Allocate Bulletin board */
323 p_iov
->bulletin
.size
= sizeof(struct qed_bulletin_content
);
324 p_iov
->bulletin
.p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
325 p_iov
->bulletin
.size
,
326 &p_iov
->bulletin
.phys
,
328 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
329 "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
330 p_iov
->bulletin
.p_virt
,
331 (u64
)p_iov
->bulletin
.phys
, p_iov
->bulletin
.size
);
333 mutex_init(&p_iov
->mutex
);
335 p_hwfn
->vf_iov_info
= p_iov
;
337 p_hwfn
->hw_info
.personality
= QED_PCI_ETH
;
339 return qed_vf_pf_acquire(p_hwfn
);
342 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
343 sizeof(union vfpf_tlvs
),
344 p_iov
->vf2pf_request
, p_iov
->vf2pf_request_phys
);
351 int qed_vf_pf_rxq_start(struct qed_hwfn
*p_hwfn
,
356 dma_addr_t bd_chain_phys_addr
,
357 dma_addr_t cqe_pbl_addr
,
358 u16 cqe_pbl_size
, void __iomem
**pp_prod
)
360 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
361 struct pfvf_start_queue_resp_tlv
*resp
;
362 struct vfpf_start_rxq_tlv
*req
;
365 /* clear mailbox and prep first tlv */
366 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_START_RXQ
, sizeof(*req
));
368 req
->rx_qid
= rx_qid
;
369 req
->cqe_pbl_addr
= cqe_pbl_addr
;
370 req
->cqe_pbl_size
= cqe_pbl_size
;
371 req
->rxq_addr
= bd_chain_phys_addr
;
373 req
->sb_index
= sb_index
;
374 req
->bd_max_bytes
= bd_max_bytes
;
377 /* add list termination tlv */
378 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
379 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
381 resp
= &p_iov
->pf2vf_reply
->queue_start
;
382 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
386 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
389 /* Learn the address of the producer from the response */
391 u64 init_prod_val
= 0;
393 *pp_prod
= (u8 __iomem
*)p_hwfn
->regview
+ resp
->offset
;
394 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
395 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
396 rx_qid
, *pp_prod
, resp
->offset
);
398 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
399 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u64
),
400 (u32
*)&init_prod_val
);
406 int qed_vf_pf_rxq_stop(struct qed_hwfn
*p_hwfn
, u16 rx_qid
, bool cqe_completion
)
408 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
409 struct vfpf_stop_rxqs_tlv
*req
;
410 struct pfvf_def_resp_tlv
*resp
;
413 /* clear mailbox and prep first tlv */
414 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_STOP_RXQS
, sizeof(*req
));
416 req
->rx_qid
= rx_qid
;
418 req
->cqe_completion
= cqe_completion
;
420 /* add list termination tlv */
421 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
422 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
424 resp
= &p_iov
->pf2vf_reply
->default_resp
;
425 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
429 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
435 int qed_vf_pf_txq_start(struct qed_hwfn
*p_hwfn
,
440 u16 pbl_size
, void __iomem
**pp_doorbell
)
442 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
443 struct pfvf_start_queue_resp_tlv
*resp
;
444 struct vfpf_start_txq_tlv
*req
;
447 /* clear mailbox and prep first tlv */
448 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_START_TXQ
, sizeof(*req
));
450 req
->tx_qid
= tx_queue_id
;
453 req
->pbl_addr
= pbl_addr
;
454 req
->pbl_size
= pbl_size
;
456 req
->sb_index
= sb_index
;
458 /* add list termination tlv */
459 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
460 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
462 resp
= &p_iov
->pf2vf_reply
->queue_start
;
463 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
467 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
) {
473 *pp_doorbell
= (u8 __iomem
*)p_hwfn
->doorbells
+ resp
->offset
;
475 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
476 "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
477 tx_queue_id
, *pp_doorbell
, resp
->offset
);
484 int qed_vf_pf_txq_stop(struct qed_hwfn
*p_hwfn
, u16 tx_qid
)
486 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
487 struct vfpf_stop_txqs_tlv
*req
;
488 struct pfvf_def_resp_tlv
*resp
;
491 /* clear mailbox and prep first tlv */
492 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_STOP_TXQS
, sizeof(*req
));
494 req
->tx_qid
= tx_qid
;
497 /* add list termination tlv */
498 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
499 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
501 resp
= &p_iov
->pf2vf_reply
->default_resp
;
502 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
506 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
512 int qed_vf_pf_vport_start(struct qed_hwfn
*p_hwfn
,
515 u8 inner_vlan_removal
,
516 enum qed_tpa_mode tpa_mode
,
517 u8 max_buffers_per_cqe
, u8 only_untagged
)
519 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
520 struct vfpf_vport_start_tlv
*req
;
521 struct pfvf_def_resp_tlv
*resp
;
524 /* clear mailbox and prep first tlv */
525 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_START
, sizeof(*req
));
528 req
->vport_id
= vport_id
;
529 req
->inner_vlan_removal
= inner_vlan_removal
;
530 req
->tpa_mode
= tpa_mode
;
531 req
->max_buffers_per_cqe
= max_buffers_per_cqe
;
532 req
->only_untagged
= only_untagged
;
535 for (i
= 0; i
< p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_sbs
; i
++)
536 if (p_hwfn
->sbs_info
[i
])
537 req
->sb_addr
[i
] = p_hwfn
->sbs_info
[i
]->sb_phys
;
539 /* add list termination tlv */
540 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
541 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
543 resp
= &p_iov
->pf2vf_reply
->default_resp
;
544 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
548 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
554 int qed_vf_pf_vport_stop(struct qed_hwfn
*p_hwfn
)
556 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
557 struct pfvf_def_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->default_resp
;
560 /* clear mailbox and prep first tlv */
561 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_TEARDOWN
,
562 sizeof(struct vfpf_first_tlv
));
564 /* add list termination tlv */
565 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
566 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
568 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
572 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
579 qed_vf_handle_vp_update_is_needed(struct qed_hwfn
*p_hwfn
,
580 struct qed_sp_vport_update_params
*p_data
,
584 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
:
585 return !!(p_data
->update_vport_active_rx_flg
||
586 p_data
->update_vport_active_tx_flg
);
587 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
:
588 return !!p_data
->update_tx_switching_flg
;
589 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
:
590 return !!p_data
->update_inner_vlan_removal_flg
;
591 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
:
592 return !!p_data
->update_accept_any_vlan_flg
;
593 case CHANNEL_TLV_VPORT_UPDATE_MCAST
:
594 return !!p_data
->update_approx_mcast_flg
;
595 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
:
596 return !!(p_data
->accept_flags
.update_rx_mode_config
||
597 p_data
->accept_flags
.update_tx_mode_config
);
598 case CHANNEL_TLV_VPORT_UPDATE_RSS
:
599 return !!p_data
->rss_params
;
600 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
:
601 return !!p_data
->sge_tpa_params
;
603 DP_INFO(p_hwfn
, "Unexpected vport-update TLV[%d]\n",
610 qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn
*p_hwfn
,
611 struct qed_sp_vport_update_params
*p_data
)
613 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
614 struct pfvf_def_resp_tlv
*p_resp
;
617 for (tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
618 tlv
< CHANNEL_TLV_VPORT_UPDATE_MAX
; tlv
++) {
619 if (!qed_vf_handle_vp_update_is_needed(p_hwfn
, p_data
, tlv
))
622 p_resp
= (struct pfvf_def_resp_tlv
*)
623 qed_iov_search_list_tlvs(p_hwfn
, p_iov
->pf2vf_reply
,
625 if (p_resp
&& p_resp
->hdr
.status
)
626 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
627 "TLV[%d] Configuration %s\n",
629 (p_resp
&& p_resp
->hdr
.status
) ? "succeeded"
634 int qed_vf_pf_vport_update(struct qed_hwfn
*p_hwfn
,
635 struct qed_sp_vport_update_params
*p_params
)
637 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
638 struct vfpf_vport_update_tlv
*req
;
639 struct pfvf_def_resp_tlv
*resp
;
640 u8 update_rx
, update_tx
;
645 resp
= &p_iov
->pf2vf_reply
->default_resp
;
646 resp_size
= sizeof(*resp
);
648 update_rx
= p_params
->update_vport_active_rx_flg
;
649 update_tx
= p_params
->update_vport_active_tx_flg
;
651 /* clear mailbox and prep header tlv */
652 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_VPORT_UPDATE
, sizeof(*req
));
654 /* Prepare extended tlvs */
655 if (update_rx
|| update_tx
) {
656 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
658 size
= sizeof(struct vfpf_vport_update_activate_tlv
);
659 p_act_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
660 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
,
662 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
665 p_act_tlv
->update_rx
= update_rx
;
666 p_act_tlv
->active_rx
= p_params
->vport_active_rx_flg
;
670 p_act_tlv
->update_tx
= update_tx
;
671 p_act_tlv
->active_tx
= p_params
->vport_active_tx_flg
;
675 if (p_params
->update_tx_switching_flg
) {
676 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
678 size
= sizeof(struct vfpf_vport_update_tx_switch_tlv
);
679 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
680 p_tx_switch_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
682 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
684 p_tx_switch_tlv
->tx_switching
= p_params
->tx_switching_flg
;
687 if (p_params
->update_approx_mcast_flg
) {
688 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
690 size
= sizeof(struct vfpf_vport_update_mcast_bin_tlv
);
691 p_mcast_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
,
692 CHANNEL_TLV_VPORT_UPDATE_MCAST
, size
);
693 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
695 memcpy(p_mcast_tlv
->bins
, p_params
->bins
,
696 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
699 update_rx
= p_params
->accept_flags
.update_rx_mode_config
;
700 update_tx
= p_params
->accept_flags
.update_tx_mode_config
;
702 if (update_rx
|| update_tx
) {
703 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
705 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
706 size
= sizeof(struct vfpf_vport_update_accept_param_tlv
);
707 p_accept_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, tlv
, size
);
708 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
711 p_accept_tlv
->update_rx_mode
= update_rx
;
712 p_accept_tlv
->rx_accept_filter
=
713 p_params
->accept_flags
.rx_accept_filter
;
717 p_accept_tlv
->update_tx_mode
= update_tx
;
718 p_accept_tlv
->tx_accept_filter
=
719 p_params
->accept_flags
.tx_accept_filter
;
723 if (p_params
->rss_params
) {
724 struct qed_rss_params
*rss_params
= p_params
->rss_params
;
725 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
727 size
= sizeof(struct vfpf_vport_update_rss_tlv
);
728 p_rss_tlv
= qed_add_tlv(p_hwfn
,
730 CHANNEL_TLV_VPORT_UPDATE_RSS
, size
);
731 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
733 if (rss_params
->update_rss_config
)
734 p_rss_tlv
->update_rss_flags
|=
735 VFPF_UPDATE_RSS_CONFIG_FLAG
;
736 if (rss_params
->update_rss_capabilities
)
737 p_rss_tlv
->update_rss_flags
|=
738 VFPF_UPDATE_RSS_CAPS_FLAG
;
739 if (rss_params
->update_rss_ind_table
)
740 p_rss_tlv
->update_rss_flags
|=
741 VFPF_UPDATE_RSS_IND_TABLE_FLAG
;
742 if (rss_params
->update_rss_key
)
743 p_rss_tlv
->update_rss_flags
|= VFPF_UPDATE_RSS_KEY_FLAG
;
745 p_rss_tlv
->rss_enable
= rss_params
->rss_enable
;
746 p_rss_tlv
->rss_caps
= rss_params
->rss_caps
;
747 p_rss_tlv
->rss_table_size_log
= rss_params
->rss_table_size_log
;
748 memcpy(p_rss_tlv
->rss_ind_table
, rss_params
->rss_ind_table
,
749 sizeof(rss_params
->rss_ind_table
));
750 memcpy(p_rss_tlv
->rss_key
, rss_params
->rss_key
,
751 sizeof(rss_params
->rss_key
));
754 if (p_params
->update_accept_any_vlan_flg
) {
755 struct vfpf_vport_update_accept_any_vlan_tlv
*p_any_vlan_tlv
;
757 size
= sizeof(struct vfpf_vport_update_accept_any_vlan_tlv
);
758 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
759 p_any_vlan_tlv
= qed_add_tlv(p_hwfn
, &p_iov
->offset
, tlv
, size
);
761 resp_size
+= sizeof(struct pfvf_def_resp_tlv
);
762 p_any_vlan_tlv
->accept_any_vlan
= p_params
->accept_any_vlan
;
763 p_any_vlan_tlv
->update_accept_any_vlan_flg
=
764 p_params
->update_accept_any_vlan_flg
;
767 /* add list termination tlv */
768 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
769 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
771 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, resp_size
);
775 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
778 qed_vf_handle_vp_update_tlvs_resp(p_hwfn
, p_params
);
783 int qed_vf_pf_reset(struct qed_hwfn
*p_hwfn
)
785 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
786 struct pfvf_def_resp_tlv
*resp
;
787 struct vfpf_first_tlv
*req
;
790 /* clear mailbox and prep first tlv */
791 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_CLOSE
, sizeof(*req
));
793 /* add list termination tlv */
794 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
795 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
797 resp
= &p_iov
->pf2vf_reply
->default_resp
;
798 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
802 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
805 p_hwfn
->b_int_enabled
= 0;
810 int qed_vf_pf_release(struct qed_hwfn
*p_hwfn
)
812 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
813 struct pfvf_def_resp_tlv
*resp
;
814 struct vfpf_first_tlv
*req
;
818 /* clear mailbox and prep first tlv */
819 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_RELEASE
, sizeof(*req
));
821 /* add list termination tlv */
822 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
823 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
825 resp
= &p_iov
->pf2vf_reply
->default_resp
;
826 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
828 if (!rc
&& resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
831 p_hwfn
->b_int_enabled
= 0;
833 if (p_iov
->vf2pf_request
)
834 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
835 sizeof(union vfpf_tlvs
),
836 p_iov
->vf2pf_request
,
837 p_iov
->vf2pf_request_phys
);
838 if (p_iov
->pf2vf_reply
)
839 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
840 sizeof(union pfvf_tlvs
),
841 p_iov
->pf2vf_reply
, p_iov
->pf2vf_reply_phys
);
843 if (p_iov
->bulletin
.p_virt
) {
844 size
= sizeof(struct qed_bulletin_content
);
845 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
847 p_iov
->bulletin
.p_virt
, p_iov
->bulletin
.phys
);
850 kfree(p_hwfn
->vf_iov_info
);
851 p_hwfn
->vf_iov_info
= NULL
;
856 void qed_vf_pf_filter_mcast(struct qed_hwfn
*p_hwfn
,
857 struct qed_filter_mcast
*p_filter_cmd
)
859 struct qed_sp_vport_update_params sp_params
;
862 memset(&sp_params
, 0, sizeof(sp_params
));
863 sp_params
.update_approx_mcast_flg
= 1;
865 if (p_filter_cmd
->opcode
== QED_FILTER_ADD
) {
866 for (i
= 0; i
< p_filter_cmd
->num_mc_addrs
; i
++) {
869 bit
= qed_mcast_bin_from_mac(p_filter_cmd
->mac
[i
]);
870 __set_bit(bit
, sp_params
.bins
);
874 qed_vf_pf_vport_update(p_hwfn
, &sp_params
);
877 int qed_vf_pf_filter_ucast(struct qed_hwfn
*p_hwfn
,
878 struct qed_filter_ucast
*p_ucast
)
880 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
881 struct vfpf_ucast_filter_tlv
*req
;
882 struct pfvf_def_resp_tlv
*resp
;
885 /* clear mailbox and prep first tlv */
886 req
= qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_UCAST_FILTER
, sizeof(*req
));
887 req
->opcode
= (u8
) p_ucast
->opcode
;
888 req
->type
= (u8
) p_ucast
->type
;
889 memcpy(req
->mac
, p_ucast
->mac
, ETH_ALEN
);
890 req
->vlan
= p_ucast
->vlan
;
892 /* add list termination tlv */
893 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
894 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
896 resp
= &p_iov
->pf2vf_reply
->default_resp
;
897 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
901 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
907 int qed_vf_pf_int_cleanup(struct qed_hwfn
*p_hwfn
)
909 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
910 struct pfvf_def_resp_tlv
*resp
= &p_iov
->pf2vf_reply
->default_resp
;
913 /* clear mailbox and prep first tlv */
914 qed_vf_pf_prep(p_hwfn
, CHANNEL_TLV_INT_CLEANUP
,
915 sizeof(struct vfpf_first_tlv
));
917 /* add list termination tlv */
918 qed_add_tlv(p_hwfn
, &p_iov
->offset
,
919 CHANNEL_TLV_LIST_END
, sizeof(struct channel_list_end_tlv
));
921 rc
= qed_send_msg2pf(p_hwfn
, &resp
->hdr
.status
, sizeof(*resp
));
925 if (resp
->hdr
.status
!= PFVF_STATUS_SUCCESS
)
931 u16
qed_vf_get_igu_sb_id(struct qed_hwfn
*p_hwfn
, u16 sb_id
)
933 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
936 DP_NOTICE(p_hwfn
, "vf_sriov_info isn't initialized\n");
940 return p_iov
->acquire_resp
.resc
.hw_sbs
[sb_id
].hw_sb_id
;
943 int qed_vf_read_bulletin(struct qed_hwfn
*p_hwfn
, u8
*p_change
)
945 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
946 struct qed_bulletin_content shadow
;
949 crc_size
= sizeof(p_iov
->bulletin
.p_virt
->crc
);
952 /* Need to guarantee PF is not in the middle of writing it */
953 memcpy(&shadow
, p_iov
->bulletin
.p_virt
, p_iov
->bulletin
.size
);
955 /* If version did not update, no need to do anything */
956 if (shadow
.version
== p_iov
->bulletin_shadow
.version
)
959 /* Verify the bulletin we see is valid */
960 crc
= crc32(0, (u8
*)&shadow
+ crc_size
,
961 p_iov
->bulletin
.size
- crc_size
);
962 if (crc
!= shadow
.crc
)
965 /* Set the shadow bulletin and process it */
966 memcpy(&p_iov
->bulletin_shadow
, &shadow
, p_iov
->bulletin
.size
);
968 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
969 "Read a bulletin update %08x\n", shadow
.version
);
976 void __qed_vf_get_link_params(struct qed_hwfn
*p_hwfn
,
977 struct qed_mcp_link_params
*p_params
,
978 struct qed_bulletin_content
*p_bulletin
)
980 memset(p_params
, 0, sizeof(*p_params
));
982 p_params
->speed
.autoneg
= p_bulletin
->req_autoneg
;
983 p_params
->speed
.advertised_speeds
= p_bulletin
->req_adv_speed
;
984 p_params
->speed
.forced_speed
= p_bulletin
->req_forced_speed
;
985 p_params
->pause
.autoneg
= p_bulletin
->req_autoneg_pause
;
986 p_params
->pause
.forced_rx
= p_bulletin
->req_forced_rx
;
987 p_params
->pause
.forced_tx
= p_bulletin
->req_forced_tx
;
988 p_params
->loopback_mode
= p_bulletin
->req_loopback
;
991 void qed_vf_get_link_params(struct qed_hwfn
*p_hwfn
,
992 struct qed_mcp_link_params
*params
)
994 __qed_vf_get_link_params(p_hwfn
, params
,
995 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
998 void __qed_vf_get_link_state(struct qed_hwfn
*p_hwfn
,
999 struct qed_mcp_link_state
*p_link
,
1000 struct qed_bulletin_content
*p_bulletin
)
1002 memset(p_link
, 0, sizeof(*p_link
));
1004 p_link
->link_up
= p_bulletin
->link_up
;
1005 p_link
->speed
= p_bulletin
->speed
;
1006 p_link
->full_duplex
= p_bulletin
->full_duplex
;
1007 p_link
->an
= p_bulletin
->autoneg
;
1008 p_link
->an_complete
= p_bulletin
->autoneg_complete
;
1009 p_link
->parallel_detection
= p_bulletin
->parallel_detection
;
1010 p_link
->pfc_enabled
= p_bulletin
->pfc_enabled
;
1011 p_link
->partner_adv_speed
= p_bulletin
->partner_adv_speed
;
1012 p_link
->partner_tx_flow_ctrl_en
= p_bulletin
->partner_tx_flow_ctrl_en
;
1013 p_link
->partner_rx_flow_ctrl_en
= p_bulletin
->partner_rx_flow_ctrl_en
;
1014 p_link
->partner_adv_pause
= p_bulletin
->partner_adv_pause
;
1015 p_link
->sfp_tx_fault
= p_bulletin
->sfp_tx_fault
;
1018 void qed_vf_get_link_state(struct qed_hwfn
*p_hwfn
,
1019 struct qed_mcp_link_state
*link
)
1021 __qed_vf_get_link_state(p_hwfn
, link
,
1022 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1025 void __qed_vf_get_link_caps(struct qed_hwfn
*p_hwfn
,
1026 struct qed_mcp_link_capabilities
*p_link_caps
,
1027 struct qed_bulletin_content
*p_bulletin
)
1029 memset(p_link_caps
, 0, sizeof(*p_link_caps
));
1030 p_link_caps
->speed_capabilities
= p_bulletin
->capability_speed
;
1033 void qed_vf_get_link_caps(struct qed_hwfn
*p_hwfn
,
1034 struct qed_mcp_link_capabilities
*p_link_caps
)
1036 __qed_vf_get_link_caps(p_hwfn
, p_link_caps
,
1037 &(p_hwfn
->vf_iov_info
->bulletin_shadow
));
1040 void qed_vf_get_num_rxqs(struct qed_hwfn
*p_hwfn
, u8
*num_rxqs
)
1042 *num_rxqs
= p_hwfn
->vf_iov_info
->acquire_resp
.resc
.num_rxqs
;
1045 void qed_vf_get_port_mac(struct qed_hwfn
*p_hwfn
, u8
*port_mac
)
1048 p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
.port_mac
, ETH_ALEN
);
1051 void qed_vf_get_num_vlan_filters(struct qed_hwfn
*p_hwfn
, u8
*num_vlan_filters
)
1053 struct qed_vf_iov
*p_vf
;
1055 p_vf
= p_hwfn
->vf_iov_info
;
1056 *num_vlan_filters
= p_vf
->acquire_resp
.resc
.num_vlan_filters
;
1059 bool qed_vf_check_mac(struct qed_hwfn
*p_hwfn
, u8
*mac
)
1061 struct qed_bulletin_content
*bulletin
;
1063 bulletin
= &p_hwfn
->vf_iov_info
->bulletin_shadow
;
1064 if (!(bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)))
1067 /* Forbid VF from changing a MAC enforced by PF */
1068 if (ether_addr_equal(bulletin
->mac
, mac
))
1074 bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn
*hwfn
,
1075 u8
*dst_mac
, u8
*p_is_forced
)
1077 struct qed_bulletin_content
*bulletin
;
1079 bulletin
= &hwfn
->vf_iov_info
->bulletin_shadow
;
1081 if (bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)) {
1084 } else if (bulletin
->valid_bitmap
& (1 << VFPF_BULLETIN_MAC_ADDR
)) {
1091 ether_addr_copy(dst_mac
, bulletin
->mac
);
1096 void qed_vf_get_fw_version(struct qed_hwfn
*p_hwfn
,
1097 u16
*fw_major
, u16
*fw_minor
,
1098 u16
*fw_rev
, u16
*fw_eng
)
1100 struct pf_vf_pfdev_info
*info
;
1102 info
= &p_hwfn
->vf_iov_info
->acquire_resp
.pfdev_info
;
1104 *fw_major
= info
->fw_major
;
1105 *fw_minor
= info
->fw_minor
;
1106 *fw_rev
= info
->fw_rev
;
1107 *fw_eng
= info
->fw_eng
;
1110 static void qed_handle_bulletin_change(struct qed_hwfn
*hwfn
)
1112 struct qed_eth_cb_ops
*ops
= hwfn
->cdev
->protocol_ops
.eth
;
1113 u8 mac
[ETH_ALEN
], is_mac_exist
, is_mac_forced
;
1114 void *cookie
= hwfn
->cdev
->ops_cookie
;
1116 is_mac_exist
= qed_vf_bulletin_get_forced_mac(hwfn
, mac
,
1118 if (is_mac_exist
&& is_mac_forced
&& cookie
)
1119 ops
->force_mac(cookie
, mac
);
1121 /* Always update link configuration according to bulletin */
1122 qed_link_update(hwfn
);
1125 void qed_iov_vf_task(struct work_struct
*work
)
1127 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
1131 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG
, &hwfn
->iov_task_flags
))
1134 /* Handle bulletin board changes */
1135 qed_vf_read_bulletin(hwfn
, &change
);
1137 qed_handle_bulletin_change(hwfn
);
1139 /* As VF is polling bulletin board, need to constantly re-schedule */
1140 queue_delayed_work(hwfn
->iov_wq
, &hwfn
->iov_task
, HZ
);