1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/etherdevice.h>
10 #include <linux/crc32.h>
11 #include <linux/qed/qed_iov_if.h>
15 #include "qed_init_ops.h"
18 #include "qed_reg_addr.h"
20 #include "qed_sriov.h"
24 static int qed_sp_vf_start(struct qed_hwfn
*p_hwfn
, struct qed_vf_info
*p_vf
)
26 struct vf_start_ramrod_data
*p_ramrod
= NULL
;
27 struct qed_spq_entry
*p_ent
= NULL
;
28 struct qed_sp_init_data init_data
;
33 memset(&init_data
, 0, sizeof(init_data
));
34 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
35 init_data
.opaque_fid
= p_vf
->opaque_fid
;
36 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
38 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
39 COMMON_RAMROD_VF_START
,
40 PROTOCOLID_COMMON
, &init_data
);
44 p_ramrod
= &p_ent
->ramrod
.vf_start
;
46 p_ramrod
->vf_id
= GET_FIELD(p_vf
->concrete_fid
, PXP_CONCRETE_FID_VFID
);
47 p_ramrod
->opaque_fid
= cpu_to_le16(p_vf
->opaque_fid
);
49 switch (p_hwfn
->hw_info
.personality
) {
51 p_ramrod
->personality
= PERSONALITY_ETH
;
53 case QED_PCI_ETH_ROCE
:
54 p_ramrod
->personality
= PERSONALITY_RDMA_AND_ETH
;
57 DP_NOTICE(p_hwfn
, "Unknown VF personality %d\n",
58 p_hwfn
->hw_info
.personality
);
62 fp_minor
= p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
;
63 if (fp_minor
> ETH_HSI_VER_MINOR
&&
64 fp_minor
!= ETH_HSI_VER_NO_PKT_LEN_TUNN
) {
67 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
70 fp_minor
, ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
71 fp_minor
= ETH_HSI_VER_MINOR
;
74 p_ramrod
->hsi_fp_ver
.major_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MAJOR
;
75 p_ramrod
->hsi_fp_ver
.minor_ver_arr
[ETH_VER_KEY
] = fp_minor
;
77 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
78 "VF[%d] - Starting using HSI %02x.%02x\n",
79 p_vf
->abs_vf_id
, ETH_HSI_VER_MAJOR
, fp_minor
);
81 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
84 static int qed_sp_vf_stop(struct qed_hwfn
*p_hwfn
,
85 u32 concrete_vfid
, u16 opaque_vfid
)
87 struct vf_stop_ramrod_data
*p_ramrod
= NULL
;
88 struct qed_spq_entry
*p_ent
= NULL
;
89 struct qed_sp_init_data init_data
;
93 memset(&init_data
, 0, sizeof(init_data
));
94 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
95 init_data
.opaque_fid
= opaque_vfid
;
96 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
98 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
99 COMMON_RAMROD_VF_STOP
,
100 PROTOCOLID_COMMON
, &init_data
);
104 p_ramrod
= &p_ent
->ramrod
.vf_stop
;
106 p_ramrod
->vf_id
= GET_FIELD(concrete_vfid
, PXP_CONCRETE_FID_VFID
);
108 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
111 bool qed_iov_is_valid_vfid(struct qed_hwfn
*p_hwfn
,
112 int rel_vf_id
, bool b_enabled_only
)
114 if (!p_hwfn
->pf_iov_info
) {
115 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
119 if ((rel_vf_id
>= p_hwfn
->cdev
->p_iov_info
->total_vfs
) ||
123 if ((!p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_init
) &&
130 static struct qed_vf_info
*qed_iov_get_vf_info(struct qed_hwfn
*p_hwfn
,
134 struct qed_vf_info
*vf
= NULL
;
136 if (!p_hwfn
->pf_iov_info
) {
137 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
141 if (qed_iov_is_valid_vfid(p_hwfn
, relative_vf_id
, b_enabled_only
))
142 vf
= &p_hwfn
->pf_iov_info
->vfs_array
[relative_vf_id
];
144 DP_ERR(p_hwfn
, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
150 static bool qed_iov_validate_rxq(struct qed_hwfn
*p_hwfn
,
151 struct qed_vf_info
*p_vf
, u16 rx_qid
)
153 if (rx_qid
>= p_vf
->num_rxqs
)
156 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
157 p_vf
->abs_vf_id
, rx_qid
, p_vf
->num_rxqs
);
158 return rx_qid
< p_vf
->num_rxqs
;
161 static bool qed_iov_validate_txq(struct qed_hwfn
*p_hwfn
,
162 struct qed_vf_info
*p_vf
, u16 tx_qid
)
164 if (tx_qid
>= p_vf
->num_txqs
)
167 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
168 p_vf
->abs_vf_id
, tx_qid
, p_vf
->num_txqs
);
169 return tx_qid
< p_vf
->num_txqs
;
172 static bool qed_iov_validate_sb(struct qed_hwfn
*p_hwfn
,
173 struct qed_vf_info
*p_vf
, u16 sb_idx
)
177 for (i
= 0; i
< p_vf
->num_sbs
; i
++)
178 if (p_vf
->igu_sbs
[i
] == sb_idx
)
183 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
184 p_vf
->abs_vf_id
, sb_idx
, p_vf
->num_sbs
);
189 int qed_iov_post_vf_bulletin(struct qed_hwfn
*p_hwfn
,
190 int vfid
, struct qed_ptt
*p_ptt
)
192 struct qed_bulletin_content
*p_bulletin
;
193 int crc_size
= sizeof(p_bulletin
->crc
);
194 struct qed_dmae_params params
;
195 struct qed_vf_info
*p_vf
;
197 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
201 if (!p_vf
->vf_bulletin
)
204 p_bulletin
= p_vf
->bulletin
.p_virt
;
206 /* Increment bulletin board version and compute crc */
207 p_bulletin
->version
++;
208 p_bulletin
->crc
= crc32(0, (u8
*)p_bulletin
+ crc_size
,
209 p_vf
->bulletin
.size
- crc_size
);
211 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
212 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
213 p_bulletin
->version
, p_vf
->relative_vf_id
, p_bulletin
->crc
);
215 /* propagate bulletin board via dmae to vm memory */
216 memset(¶ms
, 0, sizeof(params
));
217 params
.flags
= QED_DMAE_FLAG_VF_DST
;
218 params
.dst_vfid
= p_vf
->abs_vf_id
;
219 return qed_dmae_host2host(p_hwfn
, p_ptt
, p_vf
->bulletin
.phys
,
220 p_vf
->vf_bulletin
, p_vf
->bulletin
.size
/ 4,
224 static int qed_iov_pci_cfg_info(struct qed_dev
*cdev
)
226 struct qed_hw_sriov_info
*iov
= cdev
->p_iov_info
;
229 DP_VERBOSE(cdev
, QED_MSG_IOV
, "sriov ext pos %d\n", pos
);
230 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_CTRL
, &iov
->ctrl
);
232 pci_read_config_word(cdev
->pdev
,
233 pos
+ PCI_SRIOV_TOTAL_VF
, &iov
->total_vfs
);
234 pci_read_config_word(cdev
->pdev
,
235 pos
+ PCI_SRIOV_INITIAL_VF
, &iov
->initial_vfs
);
237 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_NUM_VF
, &iov
->num_vfs
);
241 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
245 pci_read_config_word(cdev
->pdev
,
246 pos
+ PCI_SRIOV_VF_OFFSET
, &iov
->offset
);
248 pci_read_config_word(cdev
->pdev
,
249 pos
+ PCI_SRIOV_VF_STRIDE
, &iov
->stride
);
251 pci_read_config_word(cdev
->pdev
,
252 pos
+ PCI_SRIOV_VF_DID
, &iov
->vf_device_id
);
254 pci_read_config_dword(cdev
->pdev
,
255 pos
+ PCI_SRIOV_SUP_PGSIZE
, &iov
->pgsz
);
257 pci_read_config_dword(cdev
->pdev
, pos
+ PCI_SRIOV_CAP
, &iov
->cap
);
259 pci_read_config_byte(cdev
->pdev
, pos
+ PCI_SRIOV_FUNC_LINK
, &iov
->link
);
263 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
269 iov
->nr_virtfn
, iov
->offset
, iov
->stride
, iov
->pgsz
);
271 /* Some sanity checks */
272 if (iov
->num_vfs
> NUM_OF_VFS(cdev
) ||
273 iov
->total_vfs
> NUM_OF_VFS(cdev
)) {
274 /* This can happen only due to a bug. In this case we set
275 * num_vfs to zero to avoid memory corruption in the code that
276 * assumes max number of vfs
279 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
289 static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn
*p_hwfn
,
290 struct qed_ptt
*p_ptt
)
292 struct qed_igu_block
*p_sb
;
296 if (!p_hwfn
->hw_info
.p_igu_info
) {
298 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
302 for (sb_id
= 0; sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
304 p_sb
= &p_hwfn
->hw_info
.p_igu_info
->igu_map
.igu_blocks
[sb_id
];
305 if ((p_sb
->status
& QED_IGU_STATUS_FREE
) &&
306 !(p_sb
->status
& QED_IGU_STATUS_PF
)) {
307 val
= qed_rd(p_hwfn
, p_ptt
,
308 IGU_REG_MAPPING_MEMORY
+ sb_id
* 4);
309 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
310 qed_wr(p_hwfn
, p_ptt
,
311 IGU_REG_MAPPING_MEMORY
+ 4 * sb_id
, val
);
316 static void qed_iov_setup_vfdb(struct qed_hwfn
*p_hwfn
)
318 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
319 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
320 struct qed_bulletin_content
*p_bulletin_virt
;
321 dma_addr_t req_p
, rply_p
, bulletin_p
;
322 union pfvf_tlvs
*p_reply_virt_addr
;
323 union vfpf_tlvs
*p_req_virt_addr
;
326 memset(p_iov_info
->vfs_array
, 0, sizeof(p_iov_info
->vfs_array
));
328 p_req_virt_addr
= p_iov_info
->mbx_msg_virt_addr
;
329 req_p
= p_iov_info
->mbx_msg_phys_addr
;
330 p_reply_virt_addr
= p_iov_info
->mbx_reply_virt_addr
;
331 rply_p
= p_iov_info
->mbx_reply_phys_addr
;
332 p_bulletin_virt
= p_iov_info
->p_bulletins
;
333 bulletin_p
= p_iov_info
->bulletins_phys
;
334 if (!p_req_virt_addr
|| !p_reply_virt_addr
|| !p_bulletin_virt
) {
336 "qed_iov_setup_vfdb called without allocating mem first\n");
340 for (idx
= 0; idx
< p_iov
->total_vfs
; idx
++) {
341 struct qed_vf_info
*vf
= &p_iov_info
->vfs_array
[idx
];
344 vf
->vf_mbx
.req_virt
= p_req_virt_addr
+ idx
;
345 vf
->vf_mbx
.req_phys
= req_p
+ idx
* sizeof(union vfpf_tlvs
);
346 vf
->vf_mbx
.reply_virt
= p_reply_virt_addr
+ idx
;
347 vf
->vf_mbx
.reply_phys
= rply_p
+ idx
* sizeof(union pfvf_tlvs
);
349 vf
->state
= VF_STOPPED
;
352 vf
->bulletin
.phys
= idx
*
353 sizeof(struct qed_bulletin_content
) +
355 vf
->bulletin
.p_virt
= p_bulletin_virt
+ idx
;
356 vf
->bulletin
.size
= sizeof(struct qed_bulletin_content
);
358 vf
->relative_vf_id
= idx
;
359 vf
->abs_vf_id
= idx
+ p_iov
->first_vf_in_pf
;
360 concrete
= qed_vfid_to_concrete(p_hwfn
, vf
->abs_vf_id
);
361 vf
->concrete_fid
= concrete
;
362 vf
->opaque_fid
= (p_hwfn
->hw_info
.opaque_fid
& 0xff) |
363 (vf
->abs_vf_id
<< 8);
364 vf
->vport_id
= idx
+ 1;
366 vf
->num_mac_filters
= QED_ETH_VF_NUM_MAC_FILTERS
;
367 vf
->num_vlan_filters
= QED_ETH_VF_NUM_VLAN_FILTERS
;
371 static int qed_iov_allocate_vfdb(struct qed_hwfn
*p_hwfn
)
373 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
377 num_vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
379 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
380 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs
);
382 /* Allocate PF Mailbox buffer (per-VF) */
383 p_iov_info
->mbx_msg_size
= sizeof(union vfpf_tlvs
) * num_vfs
;
384 p_v_addr
= &p_iov_info
->mbx_msg_virt_addr
;
385 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
386 p_iov_info
->mbx_msg_size
,
387 &p_iov_info
->mbx_msg_phys_addr
,
392 /* Allocate PF Mailbox Reply buffer (per-VF) */
393 p_iov_info
->mbx_reply_size
= sizeof(union pfvf_tlvs
) * num_vfs
;
394 p_v_addr
= &p_iov_info
->mbx_reply_virt_addr
;
395 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
396 p_iov_info
->mbx_reply_size
,
397 &p_iov_info
->mbx_reply_phys_addr
,
402 p_iov_info
->bulletins_size
= sizeof(struct qed_bulletin_content
) *
404 p_v_addr
= &p_iov_info
->p_bulletins
;
405 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
406 p_iov_info
->bulletins_size
,
407 &p_iov_info
->bulletins_phys
,
414 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
415 p_iov_info
->mbx_msg_virt_addr
,
416 (u64
) p_iov_info
->mbx_msg_phys_addr
,
417 p_iov_info
->mbx_reply_virt_addr
,
418 (u64
) p_iov_info
->mbx_reply_phys_addr
,
419 p_iov_info
->p_bulletins
, (u64
) p_iov_info
->bulletins_phys
);
424 static void qed_iov_free_vfdb(struct qed_hwfn
*p_hwfn
)
426 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
428 if (p_hwfn
->pf_iov_info
->mbx_msg_virt_addr
)
429 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
430 p_iov_info
->mbx_msg_size
,
431 p_iov_info
->mbx_msg_virt_addr
,
432 p_iov_info
->mbx_msg_phys_addr
);
434 if (p_hwfn
->pf_iov_info
->mbx_reply_virt_addr
)
435 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
436 p_iov_info
->mbx_reply_size
,
437 p_iov_info
->mbx_reply_virt_addr
,
438 p_iov_info
->mbx_reply_phys_addr
);
440 if (p_iov_info
->p_bulletins
)
441 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
442 p_iov_info
->bulletins_size
,
443 p_iov_info
->p_bulletins
,
444 p_iov_info
->bulletins_phys
);
447 int qed_iov_alloc(struct qed_hwfn
*p_hwfn
)
449 struct qed_pf_iov
*p_sriov
;
451 if (!IS_PF_SRIOV(p_hwfn
)) {
452 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
453 "No SR-IOV - no need for IOV db\n");
457 p_sriov
= kzalloc(sizeof(*p_sriov
), GFP_KERNEL
);
461 p_hwfn
->pf_iov_info
= p_sriov
;
463 return qed_iov_allocate_vfdb(p_hwfn
);
466 void qed_iov_setup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
468 if (!IS_PF_SRIOV(p_hwfn
) || !IS_PF_SRIOV_ALLOC(p_hwfn
))
471 qed_iov_setup_vfdb(p_hwfn
);
472 qed_iov_clear_vf_igu_blocks(p_hwfn
, p_ptt
);
475 void qed_iov_free(struct qed_hwfn
*p_hwfn
)
477 if (IS_PF_SRIOV_ALLOC(p_hwfn
)) {
478 qed_iov_free_vfdb(p_hwfn
);
479 kfree(p_hwfn
->pf_iov_info
);
483 void qed_iov_free_hw_info(struct qed_dev
*cdev
)
485 kfree(cdev
->p_iov_info
);
486 cdev
->p_iov_info
= NULL
;
489 int qed_iov_hw_info(struct qed_hwfn
*p_hwfn
)
491 struct qed_dev
*cdev
= p_hwfn
->cdev
;
495 if (IS_VF(p_hwfn
->cdev
))
498 /* Learn the PCI configuration */
499 pos
= pci_find_ext_capability(p_hwfn
->cdev
->pdev
,
500 PCI_EXT_CAP_ID_SRIOV
);
502 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "No PCIe IOV support\n");
506 /* Allocate a new struct for IOV information */
507 cdev
->p_iov_info
= kzalloc(sizeof(*cdev
->p_iov_info
), GFP_KERNEL
);
508 if (!cdev
->p_iov_info
)
511 cdev
->p_iov_info
->pos
= pos
;
513 rc
= qed_iov_pci_cfg_info(cdev
);
517 /* We want PF IOV to be synonemous with the existance of p_iov_info;
518 * In case the capability is published but there are no VFs, simply
519 * de-allocate the struct.
521 if (!cdev
->p_iov_info
->total_vfs
) {
522 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
523 "IOV capabilities, but no VFs are published\n");
524 kfree(cdev
->p_iov_info
);
525 cdev
->p_iov_info
= NULL
;
529 /* Calculate the first VF index - this is a bit tricky; Basically,
530 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
531 * after the first engine's VFs.
533 cdev
->p_iov_info
->first_vf_in_pf
= p_hwfn
->cdev
->p_iov_info
->offset
+
534 p_hwfn
->abs_pf_id
- 16;
535 if (QED_PATH_ID(p_hwfn
))
536 cdev
->p_iov_info
->first_vf_in_pf
-= MAX_NUM_VFS_BB
;
538 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
539 "First VF in hwfn 0x%08x\n",
540 cdev
->p_iov_info
->first_vf_in_pf
);
545 static bool qed_iov_pf_sanity_check(struct qed_hwfn
*p_hwfn
, int vfid
)
547 /* Check PF supports sriov */
548 if (IS_VF(p_hwfn
->cdev
) || !IS_QED_SRIOV(p_hwfn
->cdev
) ||
549 !IS_PF_SRIOV_ALLOC(p_hwfn
))
552 /* Check VF validity */
553 if (!qed_iov_is_valid_vfid(p_hwfn
, vfid
, true))
559 static void qed_iov_set_vf_to_disable(struct qed_dev
*cdev
,
560 u16 rel_vf_id
, u8 to_disable
)
562 struct qed_vf_info
*vf
;
565 for_each_hwfn(cdev
, i
) {
566 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
568 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
572 vf
->to_disable
= to_disable
;
576 void qed_iov_set_vfs_to_disable(struct qed_dev
*cdev
, u8 to_disable
)
580 if (!IS_QED_SRIOV(cdev
))
583 for (i
= 0; i
< cdev
->p_iov_info
->total_vfs
; i
++)
584 qed_iov_set_vf_to_disable(cdev
, i
, to_disable
);
587 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn
*p_hwfn
,
588 struct qed_ptt
*p_ptt
, u8 abs_vfid
)
590 qed_wr(p_hwfn
, p_ptt
,
591 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR
+ (abs_vfid
>> 5) * 4,
592 1 << (abs_vfid
& 0x1f));
595 static void qed_iov_vf_igu_reset(struct qed_hwfn
*p_hwfn
,
596 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
600 /* Set VF masks and configuration - pretend */
601 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
603 qed_wr(p_hwfn
, p_ptt
, IGU_REG_STATISTIC_NUM_VF_MSG_SENT
, 0);
606 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
608 /* iterate over all queues, clear sb consumer */
609 for (i
= 0; i
< vf
->num_sbs
; i
++)
610 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
612 vf
->opaque_fid
, true);
615 static void qed_iov_vf_igu_set_int(struct qed_hwfn
*p_hwfn
,
616 struct qed_ptt
*p_ptt
,
617 struct qed_vf_info
*vf
, bool enable
)
621 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
623 igu_vf_conf
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
);
626 igu_vf_conf
|= IGU_VF_CONF_MSI_MSIX_EN
;
628 igu_vf_conf
&= ~IGU_VF_CONF_MSI_MSIX_EN
;
630 qed_wr(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
, igu_vf_conf
);
633 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
636 static int qed_iov_enable_vf_access(struct qed_hwfn
*p_hwfn
,
637 struct qed_ptt
*p_ptt
,
638 struct qed_vf_info
*vf
)
640 u32 igu_vf_conf
= IGU_VF_CONF_FUNC_EN
;
648 "Enable internal access for vf %x [abs %x]\n",
649 vf
->abs_vf_id
, QED_VF_ABS_ID(p_hwfn
, vf
));
651 qed_iov_vf_pglue_clear_err(p_hwfn
, p_ptt
, QED_VF_ABS_ID(p_hwfn
, vf
));
653 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
655 rc
= qed_mcp_config_vf_msix(p_hwfn
, p_ptt
, vf
->abs_vf_id
, vf
->num_sbs
);
659 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
661 SET_FIELD(igu_vf_conf
, IGU_VF_CONF_PARENT
, p_hwfn
->rel_pf_id
);
662 STORE_RT_REG(p_hwfn
, IGU_REG_VF_CONFIGURATION_RT_OFFSET
, igu_vf_conf
);
664 qed_init_run(p_hwfn
, p_ptt
, PHASE_VF
, vf
->abs_vf_id
,
665 p_hwfn
->hw_info
.hw_mode
);
668 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
676 * @brief qed_iov_config_perm_table - configure the permission
678 * In E4, queue zone permission table size is 320x9. There
679 * are 320 VF queues for single engine device (256 for dual
680 * engine device), and each entry has the following format:
687 static void qed_iov_config_perm_table(struct qed_hwfn
*p_hwfn
,
688 struct qed_ptt
*p_ptt
,
689 struct qed_vf_info
*vf
, u8 enable
)
695 for (qid
= 0; qid
< vf
->num_rxqs
; qid
++) {
696 qed_fw_l2_queue(p_hwfn
, vf
->vf_queues
[qid
].fw_rx_qid
,
699 reg_addr
= PSWHST_REG_ZONE_PERMISSION_TABLE
+ qzone_id
* 4;
700 val
= enable
? (vf
->abs_vf_id
| BIT(8)) : 0;
701 qed_wr(p_hwfn
, p_ptt
, reg_addr
, val
);
705 static void qed_iov_enable_vf_traffic(struct qed_hwfn
*p_hwfn
,
706 struct qed_ptt
*p_ptt
,
707 struct qed_vf_info
*vf
)
709 /* Reset vf in IGU - interrupts are still disabled */
710 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
712 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 1);
714 /* Permission Table */
715 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, true);
718 static u8
qed_iov_alloc_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
719 struct qed_ptt
*p_ptt
,
720 struct qed_vf_info
*vf
, u16 num_rx_queues
)
722 struct qed_igu_block
*igu_blocks
;
723 int qid
= 0, igu_id
= 0;
726 igu_blocks
= p_hwfn
->hw_info
.p_igu_info
->igu_map
.igu_blocks
;
728 if (num_rx_queues
> p_hwfn
->hw_info
.p_igu_info
->free_blks
)
729 num_rx_queues
= p_hwfn
->hw_info
.p_igu_info
->free_blks
;
730 p_hwfn
->hw_info
.p_igu_info
->free_blks
-= num_rx_queues
;
732 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
, vf
->abs_vf_id
);
733 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 1);
734 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, 0);
736 while ((qid
< num_rx_queues
) &&
737 (igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
))) {
738 if (igu_blocks
[igu_id
].status
& QED_IGU_STATUS_FREE
) {
739 struct cau_sb_entry sb_entry
;
741 vf
->igu_sbs
[qid
] = (u16
)igu_id
;
742 igu_blocks
[igu_id
].status
&= ~QED_IGU_STATUS_FREE
;
744 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
, qid
);
746 qed_wr(p_hwfn
, p_ptt
,
747 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
,
750 /* Configure igu sb in CAU which were marked valid */
751 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
754 qed_dmae_host2grc(p_hwfn
, p_ptt
,
755 (u64
)(uintptr_t)&sb_entry
,
756 CAU_REG_SB_VAR_MEMORY
+
757 igu_id
* sizeof(u64
), 2, 0);
763 vf
->num_sbs
= (u8
) num_rx_queues
;
768 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
769 struct qed_ptt
*p_ptt
,
770 struct qed_vf_info
*vf
)
772 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
776 /* Invalidate igu CAM lines and mark them as free */
777 for (idx
= 0; idx
< vf
->num_sbs
; idx
++) {
778 igu_id
= vf
->igu_sbs
[idx
];
779 addr
= IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
;
781 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
782 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
783 qed_wr(p_hwfn
, p_ptt
, addr
, val
);
785 p_info
->igu_map
.igu_blocks
[igu_id
].status
|=
788 p_hwfn
->hw_info
.p_igu_info
->free_blks
++;
794 static int qed_iov_init_hw_for_vf(struct qed_hwfn
*p_hwfn
,
795 struct qed_ptt
*p_ptt
,
796 u16 rel_vf_id
, u16 num_rx_queues
)
798 u8 num_of_vf_avaiable_chains
= 0;
799 struct qed_vf_info
*vf
= NULL
;
804 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
806 DP_ERR(p_hwfn
, "qed_iov_init_hw_for_vf : vf is NULL\n");
811 DP_NOTICE(p_hwfn
, "VF[%d] is already active.\n", rel_vf_id
);
815 /* Limit number of queues according to number of CIDs */
816 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
, &cids
);
819 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
820 vf
->relative_vf_id
, num_rx_queues
, (u16
) cids
);
821 num_rx_queues
= min_t(u16
, num_rx_queues
, ((u16
) cids
));
823 num_of_vf_avaiable_chains
= qed_iov_alloc_vf_igu_sbs(p_hwfn
,
827 if (!num_of_vf_avaiable_chains
) {
828 DP_ERR(p_hwfn
, "no available igu sbs\n");
832 /* Choose queue number and index ranges */
833 vf
->num_rxqs
= num_of_vf_avaiable_chains
;
834 vf
->num_txqs
= num_of_vf_avaiable_chains
;
836 for (i
= 0; i
< vf
->num_rxqs
; i
++) {
837 u16 queue_id
= qed_int_queue_id_from_sb_id(p_hwfn
,
840 if (queue_id
> RESC_NUM(p_hwfn
, QED_L2_QUEUE
)) {
842 "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
843 vf
->relative_vf_id
, queue_id
);
847 /* CIDs are per-VF, so no problem having them 0-based. */
848 vf
->vf_queues
[i
].fw_rx_qid
= queue_id
;
849 vf
->vf_queues
[i
].fw_tx_qid
= queue_id
;
850 vf
->vf_queues
[i
].fw_cid
= i
;
852 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
853 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
854 vf
->relative_vf_id
, i
, vf
->igu_sbs
[i
], queue_id
, i
);
856 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, vf
);
860 if (IS_LEAD_HWFN(p_hwfn
))
861 p_hwfn
->cdev
->p_iov_info
->num_vfs
++;
867 static void qed_iov_set_link(struct qed_hwfn
*p_hwfn
,
869 struct qed_mcp_link_params
*params
,
870 struct qed_mcp_link_state
*link
,
871 struct qed_mcp_link_capabilities
*p_caps
)
873 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
876 struct qed_bulletin_content
*p_bulletin
;
881 p_bulletin
= p_vf
->bulletin
.p_virt
;
882 p_bulletin
->req_autoneg
= params
->speed
.autoneg
;
883 p_bulletin
->req_adv_speed
= params
->speed
.advertised_speeds
;
884 p_bulletin
->req_forced_speed
= params
->speed
.forced_speed
;
885 p_bulletin
->req_autoneg_pause
= params
->pause
.autoneg
;
886 p_bulletin
->req_forced_rx
= params
->pause
.forced_rx
;
887 p_bulletin
->req_forced_tx
= params
->pause
.forced_tx
;
888 p_bulletin
->req_loopback
= params
->loopback_mode
;
890 p_bulletin
->link_up
= link
->link_up
;
891 p_bulletin
->speed
= link
->speed
;
892 p_bulletin
->full_duplex
= link
->full_duplex
;
893 p_bulletin
->autoneg
= link
->an
;
894 p_bulletin
->autoneg_complete
= link
->an_complete
;
895 p_bulletin
->parallel_detection
= link
->parallel_detection
;
896 p_bulletin
->pfc_enabled
= link
->pfc_enabled
;
897 p_bulletin
->partner_adv_speed
= link
->partner_adv_speed
;
898 p_bulletin
->partner_tx_flow_ctrl_en
= link
->partner_tx_flow_ctrl_en
;
899 p_bulletin
->partner_rx_flow_ctrl_en
= link
->partner_rx_flow_ctrl_en
;
900 p_bulletin
->partner_adv_pause
= link
->partner_adv_pause
;
901 p_bulletin
->sfp_tx_fault
= link
->sfp_tx_fault
;
903 p_bulletin
->capability_speed
= p_caps
->speed_capabilities
;
906 static int qed_iov_release_hw_for_vf(struct qed_hwfn
*p_hwfn
,
907 struct qed_ptt
*p_ptt
, u16 rel_vf_id
)
909 struct qed_mcp_link_capabilities caps
;
910 struct qed_mcp_link_params params
;
911 struct qed_mcp_link_state link
;
912 struct qed_vf_info
*vf
= NULL
;
914 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
916 DP_ERR(p_hwfn
, "qed_iov_release_hw_for_vf : vf is NULL\n");
920 if (vf
->bulletin
.p_virt
)
921 memset(vf
->bulletin
.p_virt
, 0, sizeof(*vf
->bulletin
.p_virt
));
923 memset(&vf
->p_vf_info
, 0, sizeof(vf
->p_vf_info
));
925 /* Get the link configuration back in bulletin so
926 * that when VFs are re-enabled they get the actual
927 * link configuration.
929 memcpy(¶ms
, qed_mcp_get_link_params(p_hwfn
), sizeof(params
));
930 memcpy(&link
, qed_mcp_get_link_state(p_hwfn
), sizeof(link
));
931 memcpy(&caps
, qed_mcp_get_link_capabilities(p_hwfn
), sizeof(caps
));
932 qed_iov_set_link(p_hwfn
, rel_vf_id
, ¶ms
, &link
, &caps
);
934 /* Forget the VF's acquisition message */
935 memset(&vf
->acquire
, 0, sizeof(vf
->acquire
));
937 /* disablng interrupts and resetting permission table was done during
938 * vf-close, however, we could get here without going through vf_close
940 /* Disable Interrupts for VF */
941 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
943 /* Reset Permission table */
944 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
948 qed_iov_free_vf_igu_sbs(p_hwfn
, p_ptt
, vf
);
953 if (IS_LEAD_HWFN(p_hwfn
))
954 p_hwfn
->cdev
->p_iov_info
->num_vfs
--;
960 static bool qed_iov_tlv_supported(u16 tlvtype
)
962 return CHANNEL_TLV_NONE
< tlvtype
&& tlvtype
< CHANNEL_TLV_MAX
;
965 /* place a given tlv on the tlv buffer, continuing current tlv list */
966 void *qed_add_tlv(struct qed_hwfn
*p_hwfn
, u8
**offset
, u16 type
, u16 length
)
968 struct channel_tlv
*tl
= (struct channel_tlv
*)*offset
;
973 /* Offset should keep pointing to next TLV (the end of the last) */
976 /* Return a pointer to the start of the added tlv */
977 return *offset
- length
;
980 /* list the types and lengths of the tlvs on the buffer */
981 void qed_dp_tlv_list(struct qed_hwfn
*p_hwfn
, void *tlvs_list
)
983 u16 i
= 1, total_length
= 0;
984 struct channel_tlv
*tlv
;
987 tlv
= (struct channel_tlv
*)((u8
*)tlvs_list
+ total_length
);
990 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
991 "TLV number %d: type %d, length %d\n",
992 i
, tlv
->type
, tlv
->length
);
994 if (tlv
->type
== CHANNEL_TLV_LIST_END
)
997 /* Validate entry - protect against malicious VFs */
999 DP_NOTICE(p_hwfn
, "TLV of length 0 found\n");
1003 total_length
+= tlv
->length
;
1005 if (total_length
>= sizeof(struct tlv_buffer_size
)) {
1006 DP_NOTICE(p_hwfn
, "TLV ==> Buffer overflow\n");
1014 static void qed_iov_send_response(struct qed_hwfn
*p_hwfn
,
1015 struct qed_ptt
*p_ptt
,
1016 struct qed_vf_info
*p_vf
,
1017 u16 length
, u8 status
)
1019 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
1020 struct qed_dmae_params params
;
1023 mbx
->reply_virt
->default_resp
.hdr
.status
= status
;
1025 qed_dp_tlv_list(p_hwfn
, mbx
->reply_virt
);
1027 eng_vf_id
= p_vf
->abs_vf_id
;
1029 memset(¶ms
, 0, sizeof(struct qed_dmae_params
));
1030 params
.flags
= QED_DMAE_FLAG_VF_DST
;
1031 params
.dst_vfid
= eng_vf_id
;
1033 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
+ sizeof(u64
),
1034 mbx
->req_virt
->first_tlv
.reply_address
+
1036 (sizeof(union pfvf_tlvs
) - sizeof(u64
)) / 4,
1039 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
,
1040 mbx
->req_virt
->first_tlv
.reply_address
,
1041 sizeof(u64
) / 4, ¶ms
);
1044 GTT_BAR0_MAP_REG_USDM_RAM
+
1045 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id
), 1);
1048 static u16
qed_iov_vport_to_tlv(struct qed_hwfn
*p_hwfn
,
1049 enum qed_iov_vport_update_flag flag
)
1052 case QED_IOV_VP_UPDATE_ACTIVATE
:
1053 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1054 case QED_IOV_VP_UPDATE_VLAN_STRIP
:
1055 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
1056 case QED_IOV_VP_UPDATE_TX_SWITCH
:
1057 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1058 case QED_IOV_VP_UPDATE_MCAST
:
1059 return CHANNEL_TLV_VPORT_UPDATE_MCAST
;
1060 case QED_IOV_VP_UPDATE_ACCEPT_PARAM
:
1061 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1062 case QED_IOV_VP_UPDATE_RSS
:
1063 return CHANNEL_TLV_VPORT_UPDATE_RSS
;
1064 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
:
1065 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1066 case QED_IOV_VP_UPDATE_SGE_TPA
:
1067 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
1073 static u16
qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn
*p_hwfn
,
1074 struct qed_vf_info
*p_vf
,
1075 struct qed_iov_vf_mbx
*p_mbx
,
1077 u16 tlvs_mask
, u16 tlvs_accepted
)
1079 struct pfvf_def_resp_tlv
*resp
;
1080 u16 size
, total_len
, i
;
1082 memset(p_mbx
->reply_virt
, 0, sizeof(union pfvf_tlvs
));
1083 p_mbx
->offset
= (u8
*)p_mbx
->reply_virt
;
1084 size
= sizeof(struct pfvf_def_resp_tlv
);
1087 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_VPORT_UPDATE
, size
);
1089 /* Prepare response for all extended tlvs if they are found by PF */
1090 for (i
= 0; i
< QED_IOV_VP_UPDATE_MAX
; i
++) {
1091 if (!(tlvs_mask
& BIT(i
)))
1094 resp
= qed_add_tlv(p_hwfn
, &p_mbx
->offset
,
1095 qed_iov_vport_to_tlv(p_hwfn
, i
), size
);
1097 if (tlvs_accepted
& BIT(i
))
1098 resp
->hdr
.status
= status
;
1100 resp
->hdr
.status
= PFVF_STATUS_NOT_SUPPORTED
;
1104 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1105 p_vf
->relative_vf_id
,
1106 qed_iov_vport_to_tlv(p_hwfn
, i
), resp
->hdr
.status
);
1111 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_LIST_END
,
1112 sizeof(struct channel_list_end_tlv
));
1117 static void qed_iov_prepare_resp(struct qed_hwfn
*p_hwfn
,
1118 struct qed_ptt
*p_ptt
,
1119 struct qed_vf_info
*vf_info
,
1120 u16 type
, u16 length
, u8 status
)
1122 struct qed_iov_vf_mbx
*mbx
= &vf_info
->vf_mbx
;
1124 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1126 qed_add_tlv(p_hwfn
, &mbx
->offset
, type
, length
);
1127 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1128 sizeof(struct channel_list_end_tlv
));
1130 qed_iov_send_response(p_hwfn
, p_ptt
, vf_info
, length
, status
);
1133 struct qed_public_vf_info
*qed_iov_get_public_vf_info(struct qed_hwfn
*p_hwfn
,
1135 bool b_enabled_only
)
1137 struct qed_vf_info
*vf
= NULL
;
1139 vf
= qed_iov_get_vf_info(p_hwfn
, relative_vf_id
, b_enabled_only
);
1143 return &vf
->p_vf_info
;
1146 void qed_iov_clean_vf(struct qed_hwfn
*p_hwfn
, u8 vfid
)
1148 struct qed_public_vf_info
*vf_info
;
1150 vf_info
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, false);
1155 /* Clear the VF mac */
1156 memset(vf_info
->mac
, 0, ETH_ALEN
);
1159 static void qed_iov_vf_cleanup(struct qed_hwfn
*p_hwfn
,
1160 struct qed_vf_info
*p_vf
)
1164 p_vf
->vf_bulletin
= 0;
1165 p_vf
->vport_instance
= 0;
1166 p_vf
->configured_features
= 0;
1168 /* If VF previously requested less resources, go back to default */
1169 p_vf
->num_rxqs
= p_vf
->num_sbs
;
1170 p_vf
->num_txqs
= p_vf
->num_sbs
;
1172 p_vf
->num_active_rxqs
= 0;
1174 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++)
1175 p_vf
->vf_queues
[i
].rxq_active
= 0;
1177 memset(&p_vf
->shadow_config
, 0, sizeof(p_vf
->shadow_config
));
1178 memset(&p_vf
->acquire
, 0, sizeof(p_vf
->acquire
));
1179 qed_iov_clean_vf(p_hwfn
, p_vf
->relative_vf_id
);
1182 static u8
qed_iov_vf_mbx_acquire_resc(struct qed_hwfn
*p_hwfn
,
1183 struct qed_ptt
*p_ptt
,
1184 struct qed_vf_info
*p_vf
,
1185 struct vf_pf_resc_request
*p_req
,
1186 struct pf_vf_resc
*p_resp
)
1190 /* Queue related information */
1191 p_resp
->num_rxqs
= p_vf
->num_rxqs
;
1192 p_resp
->num_txqs
= p_vf
->num_txqs
;
1193 p_resp
->num_sbs
= p_vf
->num_sbs
;
1195 for (i
= 0; i
< p_resp
->num_sbs
; i
++) {
1196 p_resp
->hw_sbs
[i
].hw_sb_id
= p_vf
->igu_sbs
[i
];
1197 p_resp
->hw_sbs
[i
].sb_qid
= 0;
1200 /* These fields are filled for backward compatibility.
1201 * Unused by modern vfs.
1203 for (i
= 0; i
< p_resp
->num_rxqs
; i
++) {
1204 qed_fw_l2_queue(p_hwfn
, p_vf
->vf_queues
[i
].fw_rx_qid
,
1205 (u16
*)&p_resp
->hw_qid
[i
]);
1206 p_resp
->cid
[i
] = p_vf
->vf_queues
[i
].fw_cid
;
1209 /* Filter related information */
1210 p_resp
->num_mac_filters
= min_t(u8
, p_vf
->num_mac_filters
,
1211 p_req
->num_mac_filters
);
1212 p_resp
->num_vlan_filters
= min_t(u8
, p_vf
->num_vlan_filters
,
1213 p_req
->num_vlan_filters
);
1215 /* This isn't really needed/enforced, but some legacy VFs might depend
1216 * on the correct filling of this field.
1218 p_resp
->num_mc_filters
= QED_MAX_MC_ADDRS
;
1220 /* Validate sufficient resources for VF */
1221 if (p_resp
->num_rxqs
< p_req
->num_rxqs
||
1222 p_resp
->num_txqs
< p_req
->num_txqs
||
1223 p_resp
->num_sbs
< p_req
->num_sbs
||
1224 p_resp
->num_mac_filters
< p_req
->num_mac_filters
||
1225 p_resp
->num_vlan_filters
< p_req
->num_vlan_filters
||
1226 p_resp
->num_mc_filters
< p_req
->num_mc_filters
) {
1229 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
1237 p_req
->num_mac_filters
,
1238 p_resp
->num_mac_filters
,
1239 p_req
->num_vlan_filters
,
1240 p_resp
->num_vlan_filters
,
1241 p_req
->num_mc_filters
, p_resp
->num_mc_filters
);
1243 /* Some legacy OSes are incapable of correctly handling this
1246 if ((p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
1247 ETH_HSI_VER_NO_PKT_LEN_TUNN
) &&
1248 (p_vf
->acquire
.vfdev_info
.os_type
==
1249 VFPF_ACQUIRE_OS_WINDOWS
))
1250 return PFVF_STATUS_SUCCESS
;
1252 return PFVF_STATUS_NO_RESOURCE
;
1255 return PFVF_STATUS_SUCCESS
;
1258 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn
*p_hwfn
,
1259 struct pfvf_stats_info
*p_stats
)
1261 p_stats
->mstats
.address
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
1262 offsetof(struct mstorm_vf_zone
,
1263 non_trigger
.eth_queue_stat
);
1264 p_stats
->mstats
.len
= sizeof(struct eth_mstorm_per_queue_stat
);
1265 p_stats
->ustats
.address
= PXP_VF_BAR0_START_USDM_ZONE_B
+
1266 offsetof(struct ustorm_vf_zone
,
1267 non_trigger
.eth_queue_stat
);
1268 p_stats
->ustats
.len
= sizeof(struct eth_ustorm_per_queue_stat
);
1269 p_stats
->pstats
.address
= PXP_VF_BAR0_START_PSDM_ZONE_B
+
1270 offsetof(struct pstorm_vf_zone
,
1271 non_trigger
.eth_queue_stat
);
1272 p_stats
->pstats
.len
= sizeof(struct eth_pstorm_per_queue_stat
);
1273 p_stats
->tstats
.address
= 0;
1274 p_stats
->tstats
.len
= 0;
1277 static void qed_iov_vf_mbx_acquire(struct qed_hwfn
*p_hwfn
,
1278 struct qed_ptt
*p_ptt
,
1279 struct qed_vf_info
*vf
)
1281 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1282 struct pfvf_acquire_resp_tlv
*resp
= &mbx
->reply_virt
->acquire_resp
;
1283 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
1284 struct vfpf_acquire_tlv
*req
= &mbx
->req_virt
->acquire
;
1285 u8 vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1286 struct pf_vf_resc
*resc
= &resp
->resc
;
1289 memset(resp
, 0, sizeof(*resp
));
1291 /* Write the PF version so that VF would know which version
1292 * is supported - might be later overriden. This guarantees that
1293 * VF could recognize legacy PF based on lack of versions in reply.
1295 pfdev_info
->major_fp_hsi
= ETH_HSI_VER_MAJOR
;
1296 pfdev_info
->minor_fp_hsi
= ETH_HSI_VER_MINOR
;
1298 if (vf
->state
!= VF_FREE
&& vf
->state
!= VF_STOPPED
) {
1301 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1302 vf
->abs_vf_id
, vf
->state
);
1306 /* Validate FW compatibility */
1307 if (req
->vfdev_info
.eth_fp_hsi_major
!= ETH_HSI_VER_MAJOR
) {
1308 if (req
->vfdev_info
.capabilities
&
1309 VFPF_ACQUIRE_CAP_PRE_FP_HSI
) {
1310 struct vf_pf_vfdev_info
*p_vfdev
= &req
->vfdev_info
;
1312 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1313 "VF[%d] is pre-fastpath HSI\n",
1315 p_vfdev
->eth_fp_hsi_major
= ETH_HSI_VER_MAJOR
;
1316 p_vfdev
->eth_fp_hsi_minor
= ETH_HSI_VER_NO_PKT_LEN_TUNN
;
1319 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1321 req
->vfdev_info
.eth_fp_hsi_major
,
1322 req
->vfdev_info
.eth_fp_hsi_minor
,
1323 ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
1329 /* On 100g PFs, prevent old VFs from loading */
1330 if ((p_hwfn
->cdev
->num_hwfns
> 1) &&
1331 !(req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_100G
)) {
1333 "VF[%d] is running an old driver that doesn't support 100g\n",
1338 /* Store the acquire message */
1339 memcpy(&vf
->acquire
, req
, sizeof(vf
->acquire
));
1341 vf
->opaque_fid
= req
->vfdev_info
.opaque_fid
;
1343 vf
->vf_bulletin
= req
->bulletin_addr
;
1344 vf
->bulletin
.size
= (vf
->bulletin
.size
< req
->bulletin_size
) ?
1345 vf
->bulletin
.size
: req
->bulletin_size
;
1347 /* fill in pfdev info */
1348 pfdev_info
->chip_num
= p_hwfn
->cdev
->chip_num
;
1349 pfdev_info
->db_size
= 0;
1350 pfdev_info
->indices_per_sb
= PIS_PER_SB
;
1352 pfdev_info
->capabilities
= PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED
|
1353 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
;
1354 if (p_hwfn
->cdev
->num_hwfns
> 1)
1355 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_100G
;
1357 qed_iov_vf_mbx_acquire_stats(p_hwfn
, &pfdev_info
->stats_info
);
1359 memcpy(pfdev_info
->port_mac
, p_hwfn
->hw_info
.hw_mac_addr
, ETH_ALEN
);
1361 pfdev_info
->fw_major
= FW_MAJOR_VERSION
;
1362 pfdev_info
->fw_minor
= FW_MINOR_VERSION
;
1363 pfdev_info
->fw_rev
= FW_REVISION_VERSION
;
1364 pfdev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
1366 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1369 pfdev_info
->minor_fp_hsi
= min_t(u8
, ETH_HSI_VER_MINOR
,
1370 req
->vfdev_info
.eth_fp_hsi_minor
);
1371 pfdev_info
->os_type
= VFPF_ACQUIRE_OS_LINUX
;
1372 qed_mcp_get_mfw_ver(p_hwfn
, p_ptt
, &pfdev_info
->mfw_ver
, NULL
);
1374 pfdev_info
->dev_type
= p_hwfn
->cdev
->type
;
1375 pfdev_info
->chip_rev
= p_hwfn
->cdev
->chip_rev
;
1377 /* Fill resources available to VF; Make sure there are enough to
1378 * satisfy the VF's request.
1380 vfpf_status
= qed_iov_vf_mbx_acquire_resc(p_hwfn
, p_ptt
, vf
,
1381 &req
->resc_request
, resc
);
1382 if (vfpf_status
!= PFVF_STATUS_SUCCESS
)
1385 /* Start the VF in FW */
1386 rc
= qed_sp_vf_start(p_hwfn
, vf
);
1388 DP_NOTICE(p_hwfn
, "Failed to start VF[%02x]\n", vf
->abs_vf_id
);
1389 vfpf_status
= PFVF_STATUS_FAILURE
;
1393 /* Fill agreed size of bulletin board in response */
1394 resp
->bulletin_size
= vf
->bulletin
.size
;
1395 qed_iov_post_vf_bulletin(p_hwfn
, vf
->relative_vf_id
, p_ptt
);
1399 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1400 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1402 resp
->pfdev_info
.chip_num
,
1403 resp
->pfdev_info
.db_size
,
1404 resp
->pfdev_info
.indices_per_sb
,
1405 resp
->pfdev_info
.capabilities
,
1409 resc
->num_mac_filters
,
1410 resc
->num_vlan_filters
);
1411 vf
->state
= VF_ACQUIRED
;
1413 /* Prepare Response */
1415 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_ACQUIRE
,
1416 sizeof(struct pfvf_acquire_resp_tlv
), vfpf_status
);
1419 static int __qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
,
1420 struct qed_vf_info
*p_vf
, bool val
)
1422 struct qed_sp_vport_update_params params
;
1425 if (val
== p_vf
->spoof_chk
) {
1426 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1427 "Spoofchk value[%d] is already configured\n", val
);
1431 memset(¶ms
, 0, sizeof(struct qed_sp_vport_update_params
));
1432 params
.opaque_fid
= p_vf
->opaque_fid
;
1433 params
.vport_id
= p_vf
->vport_id
;
1434 params
.update_anti_spoofing_en_flg
= 1;
1435 params
.anti_spoofing_en
= val
;
1437 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
1439 p_vf
->spoof_chk
= val
;
1440 p_vf
->req_spoofchk_val
= p_vf
->spoof_chk
;
1441 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1442 "Spoofchk val[%d] configured\n", val
);
1444 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1445 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1446 val
, p_vf
->relative_vf_id
);
1452 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn
*p_hwfn
,
1453 struct qed_vf_info
*p_vf
)
1455 struct qed_filter_ucast filter
;
1459 memset(&filter
, 0, sizeof(filter
));
1460 filter
.is_rx_filter
= 1;
1461 filter
.is_tx_filter
= 1;
1462 filter
.vport_to_add_to
= p_vf
->vport_id
;
1463 filter
.opcode
= QED_FILTER_ADD
;
1465 /* Reconfigure vlans */
1466 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
1467 if (!p_vf
->shadow_config
.vlans
[i
].used
)
1470 filter
.type
= QED_FILTER_VLAN
;
1471 filter
.vlan
= p_vf
->shadow_config
.vlans
[i
].vid
;
1472 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1473 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1474 filter
.vlan
, p_vf
->relative_vf_id
);
1475 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1476 &filter
, QED_SPQ_MODE_CB
, NULL
);
1479 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1480 filter
.vlan
, p_vf
->relative_vf_id
);
1489 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn
*p_hwfn
,
1490 struct qed_vf_info
*p_vf
, u64 events
)
1494 if ((events
& BIT(VLAN_ADDR_FORCED
)) &&
1495 !(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
)))
1496 rc
= qed_iov_reconfigure_unicast_vlan(p_hwfn
, p_vf
);
1501 static int qed_iov_configure_vport_forced(struct qed_hwfn
*p_hwfn
,
1502 struct qed_vf_info
*p_vf
, u64 events
)
1505 struct qed_filter_ucast filter
;
1507 if (!p_vf
->vport_instance
)
1510 if (events
& BIT(MAC_ADDR_FORCED
)) {
1511 /* Since there's no way [currently] of removing the MAC,
1512 * we can always assume this means we need to force it.
1514 memset(&filter
, 0, sizeof(filter
));
1515 filter
.type
= QED_FILTER_MAC
;
1516 filter
.opcode
= QED_FILTER_REPLACE
;
1517 filter
.is_rx_filter
= 1;
1518 filter
.is_tx_filter
= 1;
1519 filter
.vport_to_add_to
= p_vf
->vport_id
;
1520 ether_addr_copy(filter
.mac
, p_vf
->bulletin
.p_virt
->mac
);
1522 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1523 &filter
, QED_SPQ_MODE_CB
, NULL
);
1526 "PF failed to configure MAC for VF\n");
1530 p_vf
->configured_features
|= 1 << MAC_ADDR_FORCED
;
1533 if (events
& BIT(VLAN_ADDR_FORCED
)) {
1534 struct qed_sp_vport_update_params vport_update
;
1538 memset(&filter
, 0, sizeof(filter
));
1539 filter
.type
= QED_FILTER_VLAN
;
1540 filter
.is_rx_filter
= 1;
1541 filter
.is_tx_filter
= 1;
1542 filter
.vport_to_add_to
= p_vf
->vport_id
;
1543 filter
.vlan
= p_vf
->bulletin
.p_virt
->pvid
;
1544 filter
.opcode
= filter
.vlan
? QED_FILTER_REPLACE
:
1547 /* Send the ramrod */
1548 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1549 &filter
, QED_SPQ_MODE_CB
, NULL
);
1552 "PF failed to configure VLAN for VF\n");
1556 /* Update the default-vlan & silent vlan stripping */
1557 memset(&vport_update
, 0, sizeof(vport_update
));
1558 vport_update
.opaque_fid
= p_vf
->opaque_fid
;
1559 vport_update
.vport_id
= p_vf
->vport_id
;
1560 vport_update
.update_default_vlan_enable_flg
= 1;
1561 vport_update
.default_vlan_enable_flg
= filter
.vlan
? 1 : 0;
1562 vport_update
.update_default_vlan_flg
= 1;
1563 vport_update
.default_vlan
= filter
.vlan
;
1565 vport_update
.update_inner_vlan_removal_flg
= 1;
1566 removal
= filter
.vlan
? 1
1567 : p_vf
->shadow_config
.inner_vlan_removal
;
1568 vport_update
.inner_vlan_removal_flg
= removal
;
1569 vport_update
.silent_vlan_removal_flg
= filter
.vlan
? 1 : 0;
1570 rc
= qed_sp_vport_update(p_hwfn
,
1572 QED_SPQ_MODE_EBLOCK
, NULL
);
1575 "PF failed to configure VF vport for vlan\n");
1579 /* Update all the Rx queues */
1580 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++) {
1583 if (!p_vf
->vf_queues
[i
].rxq_active
)
1586 qid
= p_vf
->vf_queues
[i
].fw_rx_qid
;
1588 rc
= qed_sp_eth_rx_queues_update(p_hwfn
, qid
,
1590 QED_SPQ_MODE_EBLOCK
,
1594 "Failed to send Rx update fo queue[0x%04x]\n",
1601 p_vf
->configured_features
|= 1 << VLAN_ADDR_FORCED
;
1603 p_vf
->configured_features
&= ~BIT(VLAN_ADDR_FORCED
);
1606 /* If forced features are terminated, we need to configure the shadow
1607 * configuration back again.
1610 qed_iov_reconfigure_unicast_shadow(p_hwfn
, p_vf
, events
);
1615 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn
*p_hwfn
,
1616 struct qed_ptt
*p_ptt
,
1617 struct qed_vf_info
*vf
)
1619 struct qed_sp_vport_start_params params
= { 0 };
1620 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1621 struct vfpf_vport_start_tlv
*start
;
1622 u8 status
= PFVF_STATUS_SUCCESS
;
1623 struct qed_vf_info
*vf_info
;
1628 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vf
->relative_vf_id
, true);
1630 DP_NOTICE(p_hwfn
->cdev
,
1631 "Failed to get VF info, invalid vfid [%d]\n",
1632 vf
->relative_vf_id
);
1636 vf
->state
= VF_ENABLED
;
1637 start
= &mbx
->req_virt
->start_vport
;
1639 /* Initialize Status block in CAU */
1640 for (sb_id
= 0; sb_id
< vf
->num_sbs
; sb_id
++) {
1641 if (!start
->sb_addr
[sb_id
]) {
1642 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1643 "VF[%d] did not fill the address of SB %d\n",
1644 vf
->relative_vf_id
, sb_id
);
1648 qed_int_cau_conf_sb(p_hwfn
, p_ptt
,
1649 start
->sb_addr
[sb_id
],
1650 vf
->igu_sbs
[sb_id
], vf
->abs_vf_id
, 1);
1652 qed_iov_enable_vf_traffic(p_hwfn
, p_ptt
, vf
);
1654 vf
->mtu
= start
->mtu
;
1655 vf
->shadow_config
.inner_vlan_removal
= start
->inner_vlan_removal
;
1657 /* Take into consideration configuration forced by hypervisor;
1658 * If none is configured, use the supplied VF values [for old
1659 * vfs that would still be fine, since they passed '0' as padding].
1661 p_bitmap
= &vf_info
->bulletin
.p_virt
->valid_bitmap
;
1662 if (!(*p_bitmap
& BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED
))) {
1663 u8 vf_req
= start
->only_untagged
;
1665 vf_info
->bulletin
.p_virt
->default_only_untagged
= vf_req
;
1666 *p_bitmap
|= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT
;
1669 params
.tpa_mode
= start
->tpa_mode
;
1670 params
.remove_inner_vlan
= start
->inner_vlan_removal
;
1671 params
.tx_switching
= true;
1673 params
.only_untagged
= vf_info
->bulletin
.p_virt
->default_only_untagged
;
1674 params
.drop_ttl0
= false;
1675 params
.concrete_fid
= vf
->concrete_fid
;
1676 params
.opaque_fid
= vf
->opaque_fid
;
1677 params
.vport_id
= vf
->vport_id
;
1678 params
.max_buffers_per_cqe
= start
->max_buffers_per_cqe
;
1679 params
.mtu
= vf
->mtu
;
1680 params
.check_mac
= true;
1682 rc
= qed_sp_eth_vport_start(p_hwfn
, ¶ms
);
1685 "qed_iov_vf_mbx_start_vport returned error %d\n", rc
);
1686 status
= PFVF_STATUS_FAILURE
;
1688 vf
->vport_instance
++;
1690 /* Force configuration if needed on the newly opened vport */
1691 qed_iov_configure_vport_forced(p_hwfn
, vf
, *p_bitmap
);
1693 __qed_iov_spoofchk_set(p_hwfn
, vf
, vf
->req_spoofchk_val
);
1695 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_START
,
1696 sizeof(struct pfvf_def_resp_tlv
), status
);
1699 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn
*p_hwfn
,
1700 struct qed_ptt
*p_ptt
,
1701 struct qed_vf_info
*vf
)
1703 u8 status
= PFVF_STATUS_SUCCESS
;
1706 vf
->vport_instance
--;
1707 vf
->spoof_chk
= false;
1709 rc
= qed_sp_vport_stop(p_hwfn
, vf
->opaque_fid
, vf
->vport_id
);
1711 DP_ERR(p_hwfn
, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1713 status
= PFVF_STATUS_FAILURE
;
1716 /* Forget the configuration on the vport */
1717 vf
->configured_features
= 0;
1718 memset(&vf
->shadow_config
, 0, sizeof(vf
->shadow_config
));
1720 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_TEARDOWN
,
1721 sizeof(struct pfvf_def_resp_tlv
), status
);
1724 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn
*p_hwfn
,
1725 struct qed_ptt
*p_ptt
,
1726 struct qed_vf_info
*vf
,
1727 u8 status
, bool b_legacy
)
1729 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1730 struct pfvf_start_queue_resp_tlv
*p_tlv
;
1731 struct vfpf_start_rxq_tlv
*req
;
1734 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1736 /* Taking a bigger struct instead of adding a TLV to list was a
1737 * mistake, but one which we're now stuck with, as some older
1738 * clients assume the size of the previous response.
1741 length
= sizeof(*p_tlv
);
1743 length
= sizeof(struct pfvf_def_resp_tlv
);
1745 p_tlv
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_RXQ
,
1747 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1748 sizeof(struct channel_list_end_tlv
));
1750 /* Update the TLV with the response */
1751 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
) {
1752 req
= &mbx
->req_virt
->start_rxq
;
1753 p_tlv
->offset
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
1754 offsetof(struct mstorm_vf_zone
,
1755 non_trigger
.eth_rx_queue_producers
) +
1756 sizeof(struct eth_rx_prod_data
) * req
->rx_qid
;
1759 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
1762 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn
*p_hwfn
,
1763 struct qed_ptt
*p_ptt
,
1764 struct qed_vf_info
*vf
)
1766 struct qed_queue_start_common_params params
;
1767 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1768 u8 status
= PFVF_STATUS_NO_RESOURCE
;
1769 struct vfpf_start_rxq_tlv
*req
;
1770 bool b_legacy_vf
= false;
1773 memset(¶ms
, 0, sizeof(params
));
1774 req
= &mbx
->req_virt
->start_rxq
;
1776 if (!qed_iov_validate_rxq(p_hwfn
, vf
, req
->rx_qid
) ||
1777 !qed_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
1780 params
.queue_id
= vf
->vf_queues
[req
->rx_qid
].fw_rx_qid
;
1781 params
.vf_qid
= req
->rx_qid
;
1782 params
.vport_id
= vf
->vport_id
;
1783 params
.sb
= req
->hw_sb
;
1784 params
.sb_idx
= req
->sb_index
;
1786 /* Legacy VFs have their Producers in a different location, which they
1787 * calculate on their own and clean the producer prior to this.
1789 if (vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
1790 ETH_HSI_VER_NO_PKT_LEN_TUNN
) {
1794 GTT_BAR0_MAP_REG_MSDM_RAM
+
1795 MSTORM_ETH_VF_PRODS_OFFSET(vf
->abs_vf_id
, req
->rx_qid
),
1799 rc
= qed_sp_eth_rxq_start_ramrod(p_hwfn
, vf
->opaque_fid
,
1800 vf
->vf_queues
[req
->rx_qid
].fw_cid
,
1802 vf
->abs_vf_id
+ 0x10,
1805 req
->cqe_pbl_addr
, req
->cqe_pbl_size
,
1809 status
= PFVF_STATUS_FAILURE
;
1811 status
= PFVF_STATUS_SUCCESS
;
1812 vf
->vf_queues
[req
->rx_qid
].rxq_active
= true;
1813 vf
->num_active_rxqs
++;
1817 qed_iov_vf_mbx_start_rxq_resp(p_hwfn
, p_ptt
, vf
, status
, b_legacy_vf
);
1820 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn
*p_hwfn
,
1821 struct qed_ptt
*p_ptt
,
1822 struct qed_vf_info
*p_vf
, u8 status
)
1824 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
1825 struct pfvf_start_queue_resp_tlv
*p_tlv
;
1826 bool b_legacy
= false;
1829 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1831 /* Taking a bigger struct instead of adding a TLV to list was a
1832 * mistake, but one which we're now stuck with, as some older
1833 * clients assume the size of the previous response.
1835 if (p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
1836 ETH_HSI_VER_NO_PKT_LEN_TUNN
)
1840 length
= sizeof(*p_tlv
);
1842 length
= sizeof(struct pfvf_def_resp_tlv
);
1844 p_tlv
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_TXQ
,
1846 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1847 sizeof(struct channel_list_end_tlv
));
1849 /* Update the TLV with the response */
1850 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
) {
1851 u16 qid
= mbx
->req_virt
->start_txq
.tx_qid
;
1853 p_tlv
->offset
= qed_db_addr(p_vf
->vf_queues
[qid
].fw_cid
,
1857 qed_iov_send_response(p_hwfn
, p_ptt
, p_vf
, length
, status
);
1860 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn
*p_hwfn
,
1861 struct qed_ptt
*p_ptt
,
1862 struct qed_vf_info
*vf
)
1864 struct qed_queue_start_common_params params
;
1865 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1866 u8 status
= PFVF_STATUS_NO_RESOURCE
;
1867 union qed_qm_pq_params pq_params
;
1868 struct vfpf_start_txq_tlv
*req
;
1871 /* Prepare the parameters which would choose the right PQ */
1872 memset(&pq_params
, 0, sizeof(pq_params
));
1873 pq_params
.eth
.is_vf
= 1;
1874 pq_params
.eth
.vf_id
= vf
->relative_vf_id
;
1876 memset(¶ms
, 0, sizeof(params
));
1877 req
= &mbx
->req_virt
->start_txq
;
1879 if (!qed_iov_validate_txq(p_hwfn
, vf
, req
->tx_qid
) ||
1880 !qed_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
1883 params
.queue_id
= vf
->vf_queues
[req
->tx_qid
].fw_tx_qid
;
1884 params
.vport_id
= vf
->vport_id
;
1885 params
.sb
= req
->hw_sb
;
1886 params
.sb_idx
= req
->sb_index
;
1888 rc
= qed_sp_eth_txq_start_ramrod(p_hwfn
,
1890 vf
->vf_queues
[req
->tx_qid
].fw_cid
,
1892 vf
->abs_vf_id
+ 0x10,
1894 req
->pbl_size
, &pq_params
);
1897 status
= PFVF_STATUS_FAILURE
;
1899 status
= PFVF_STATUS_SUCCESS
;
1900 vf
->vf_queues
[req
->tx_qid
].txq_active
= true;
1904 qed_iov_vf_mbx_start_txq_resp(p_hwfn
, p_ptt
, vf
, status
);
1907 static int qed_iov_vf_stop_rxqs(struct qed_hwfn
*p_hwfn
,
1908 struct qed_vf_info
*vf
,
1909 u16 rxq_id
, u8 num_rxqs
, bool cqe_completion
)
1914 if (rxq_id
+ num_rxqs
> ARRAY_SIZE(vf
->vf_queues
))
1917 for (qid
= rxq_id
; qid
< rxq_id
+ num_rxqs
; qid
++) {
1918 if (vf
->vf_queues
[qid
].rxq_active
) {
1919 rc
= qed_sp_eth_rx_queue_stop(p_hwfn
,
1927 vf
->vf_queues
[qid
].rxq_active
= false;
1928 vf
->num_active_rxqs
--;
1934 static int qed_iov_vf_stop_txqs(struct qed_hwfn
*p_hwfn
,
1935 struct qed_vf_info
*vf
, u16 txq_id
, u8 num_txqs
)
1940 if (txq_id
+ num_txqs
> ARRAY_SIZE(vf
->vf_queues
))
1943 for (qid
= txq_id
; qid
< txq_id
+ num_txqs
; qid
++) {
1944 if (vf
->vf_queues
[qid
].txq_active
) {
1945 rc
= qed_sp_eth_tx_queue_stop(p_hwfn
,
1952 vf
->vf_queues
[qid
].txq_active
= false;
1957 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn
*p_hwfn
,
1958 struct qed_ptt
*p_ptt
,
1959 struct qed_vf_info
*vf
)
1961 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1962 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1963 u8 status
= PFVF_STATUS_SUCCESS
;
1964 struct vfpf_stop_rxqs_tlv
*req
;
1967 /* We give the option of starting from qid != 0, in this case we
1968 * need to make sure that qid + num_qs doesn't exceed the actual
1969 * amount of queues that exist.
1971 req
= &mbx
->req_virt
->stop_rxqs
;
1972 rc
= qed_iov_vf_stop_rxqs(p_hwfn
, vf
, req
->rx_qid
,
1973 req
->num_rxqs
, req
->cqe_completion
);
1975 status
= PFVF_STATUS_FAILURE
;
1977 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_RXQS
,
1981 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn
*p_hwfn
,
1982 struct qed_ptt
*p_ptt
,
1983 struct qed_vf_info
*vf
)
1985 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1986 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1987 u8 status
= PFVF_STATUS_SUCCESS
;
1988 struct vfpf_stop_txqs_tlv
*req
;
1991 /* We give the option of starting from qid != 0, in this case we
1992 * need to make sure that qid + num_qs doesn't exceed the actual
1993 * amount of queues that exist.
1995 req
= &mbx
->req_virt
->stop_txqs
;
1996 rc
= qed_iov_vf_stop_txqs(p_hwfn
, vf
, req
->tx_qid
, req
->num_txqs
);
1998 status
= PFVF_STATUS_FAILURE
;
2000 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_TXQS
,
2004 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn
*p_hwfn
,
2005 struct qed_ptt
*p_ptt
,
2006 struct qed_vf_info
*vf
)
2008 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2009 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2010 struct vfpf_update_rxq_tlv
*req
;
2011 u8 status
= PFVF_STATUS_SUCCESS
;
2012 u8 complete_event_flg
;
2013 u8 complete_cqe_flg
;
2018 req
= &mbx
->req_virt
->update_rxq
;
2019 complete_cqe_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_CQE_FLAG
);
2020 complete_event_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG
);
2022 for (i
= 0; i
< req
->num_rxqs
; i
++) {
2023 qid
= req
->rx_qid
+ i
;
2025 if (!vf
->vf_queues
[qid
].rxq_active
) {
2026 DP_NOTICE(p_hwfn
, "VF rx_qid = %d isn`t active!\n",
2028 status
= PFVF_STATUS_FAILURE
;
2032 rc
= qed_sp_eth_rx_queues_update(p_hwfn
,
2033 vf
->vf_queues
[qid
].fw_rx_qid
,
2037 QED_SPQ_MODE_EBLOCK
, NULL
);
2040 status
= PFVF_STATUS_FAILURE
;
2045 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UPDATE_RXQ
,
2049 void *qed_iov_search_list_tlvs(struct qed_hwfn
*p_hwfn
,
2050 void *p_tlvs_list
, u16 req_type
)
2052 struct channel_tlv
*p_tlv
= (struct channel_tlv
*)p_tlvs_list
;
2056 if (!p_tlv
->length
) {
2057 DP_NOTICE(p_hwfn
, "Zero length TLV found\n");
2061 if (p_tlv
->type
== req_type
) {
2062 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2063 "Extended tlv type %d, length %d found\n",
2064 p_tlv
->type
, p_tlv
->length
);
2068 len
+= p_tlv
->length
;
2069 p_tlv
= (struct channel_tlv
*)((u8
*)p_tlv
+ p_tlv
->length
);
2071 if ((len
+ p_tlv
->length
) > TLV_BUFFER_SIZE
) {
2072 DP_NOTICE(p_hwfn
, "TLVs has overrun the buffer size\n");
2075 } while (p_tlv
->type
!= CHANNEL_TLV_LIST_END
);
2081 qed_iov_vp_update_act_param(struct qed_hwfn
*p_hwfn
,
2082 struct qed_sp_vport_update_params
*p_data
,
2083 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2085 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
2086 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
2088 p_act_tlv
= (struct vfpf_vport_update_activate_tlv
*)
2089 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2093 p_data
->update_vport_active_rx_flg
= p_act_tlv
->update_rx
;
2094 p_data
->vport_active_rx_flg
= p_act_tlv
->active_rx
;
2095 p_data
->update_vport_active_tx_flg
= p_act_tlv
->update_tx
;
2096 p_data
->vport_active_tx_flg
= p_act_tlv
->active_tx
;
2097 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACTIVATE
;
2101 qed_iov_vp_update_vlan_param(struct qed_hwfn
*p_hwfn
,
2102 struct qed_sp_vport_update_params
*p_data
,
2103 struct qed_vf_info
*p_vf
,
2104 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2106 struct vfpf_vport_update_vlan_strip_tlv
*p_vlan_tlv
;
2107 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
2109 p_vlan_tlv
= (struct vfpf_vport_update_vlan_strip_tlv
*)
2110 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2114 p_vf
->shadow_config
.inner_vlan_removal
= p_vlan_tlv
->remove_vlan
;
2116 /* Ignore the VF request if we're forcing a vlan */
2117 if (!(p_vf
->configured_features
& BIT(VLAN_ADDR_FORCED
))) {
2118 p_data
->update_inner_vlan_removal_flg
= 1;
2119 p_data
->inner_vlan_removal_flg
= p_vlan_tlv
->remove_vlan
;
2122 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP
;
2126 qed_iov_vp_update_tx_switch(struct qed_hwfn
*p_hwfn
,
2127 struct qed_sp_vport_update_params
*p_data
,
2128 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2130 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
2131 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
2133 p_tx_switch_tlv
= (struct vfpf_vport_update_tx_switch_tlv
*)
2134 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2136 if (!p_tx_switch_tlv
)
2139 p_data
->update_tx_switching_flg
= 1;
2140 p_data
->tx_switching_flg
= p_tx_switch_tlv
->tx_switching
;
2141 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_TX_SWITCH
;
2145 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn
*p_hwfn
,
2146 struct qed_sp_vport_update_params
*p_data
,
2147 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2149 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
2150 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_MCAST
;
2152 p_mcast_tlv
= (struct vfpf_vport_update_mcast_bin_tlv
*)
2153 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2157 p_data
->update_approx_mcast_flg
= 1;
2158 memcpy(p_data
->bins
, p_mcast_tlv
->bins
,
2159 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
2160 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_MCAST
;
2164 qed_iov_vp_update_accept_flag(struct qed_hwfn
*p_hwfn
,
2165 struct qed_sp_vport_update_params
*p_data
,
2166 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2168 struct qed_filter_accept_flags
*p_flags
= &p_data
->accept_flags
;
2169 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
2170 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
2172 p_accept_tlv
= (struct vfpf_vport_update_accept_param_tlv
*)
2173 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2177 p_flags
->update_rx_mode_config
= p_accept_tlv
->update_rx_mode
;
2178 p_flags
->rx_accept_filter
= p_accept_tlv
->rx_accept_filter
;
2179 p_flags
->update_tx_mode_config
= p_accept_tlv
->update_tx_mode
;
2180 p_flags
->tx_accept_filter
= p_accept_tlv
->tx_accept_filter
;
2181 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM
;
2185 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn
*p_hwfn
,
2186 struct qed_sp_vport_update_params
*p_data
,
2187 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2189 struct vfpf_vport_update_accept_any_vlan_tlv
*p_accept_any_vlan
;
2190 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
2192 p_accept_any_vlan
= (struct vfpf_vport_update_accept_any_vlan_tlv
*)
2193 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2195 if (!p_accept_any_vlan
)
2198 p_data
->accept_any_vlan
= p_accept_any_vlan
->accept_any_vlan
;
2199 p_data
->update_accept_any_vlan_flg
=
2200 p_accept_any_vlan
->update_accept_any_vlan_flg
;
2201 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
;
2205 qed_iov_vp_update_rss_param(struct qed_hwfn
*p_hwfn
,
2206 struct qed_vf_info
*vf
,
2207 struct qed_sp_vport_update_params
*p_data
,
2208 struct qed_rss_params
*p_rss
,
2209 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2211 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
2212 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_RSS
;
2213 u16 i
, q_idx
, max_q_idx
;
2216 p_rss_tlv
= (struct vfpf_vport_update_rss_tlv
*)
2217 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2219 p_data
->rss_params
= NULL
;
2223 memset(p_rss
, 0, sizeof(struct qed_rss_params
));
2225 p_rss
->update_rss_config
= !!(p_rss_tlv
->update_rss_flags
&
2226 VFPF_UPDATE_RSS_CONFIG_FLAG
);
2227 p_rss
->update_rss_capabilities
= !!(p_rss_tlv
->update_rss_flags
&
2228 VFPF_UPDATE_RSS_CAPS_FLAG
);
2229 p_rss
->update_rss_ind_table
= !!(p_rss_tlv
->update_rss_flags
&
2230 VFPF_UPDATE_RSS_IND_TABLE_FLAG
);
2231 p_rss
->update_rss_key
= !!(p_rss_tlv
->update_rss_flags
&
2232 VFPF_UPDATE_RSS_KEY_FLAG
);
2234 p_rss
->rss_enable
= p_rss_tlv
->rss_enable
;
2235 p_rss
->rss_eng_id
= vf
->relative_vf_id
+ 1;
2236 p_rss
->rss_caps
= p_rss_tlv
->rss_caps
;
2237 p_rss
->rss_table_size_log
= p_rss_tlv
->rss_table_size_log
;
2238 memcpy(p_rss
->rss_ind_table
, p_rss_tlv
->rss_ind_table
,
2239 sizeof(p_rss
->rss_ind_table
));
2240 memcpy(p_rss
->rss_key
, p_rss_tlv
->rss_key
, sizeof(p_rss
->rss_key
));
2242 table_size
= min_t(u16
, ARRAY_SIZE(p_rss
->rss_ind_table
),
2243 (1 << p_rss_tlv
->rss_table_size_log
));
2245 max_q_idx
= ARRAY_SIZE(vf
->vf_queues
);
2247 for (i
= 0; i
< table_size
; i
++) {
2248 u16 index
= vf
->vf_queues
[0].fw_rx_qid
;
2250 q_idx
= p_rss
->rss_ind_table
[i
];
2251 if (q_idx
>= max_q_idx
)
2253 "rss_ind_table[%d] = %d, rxq is out of range\n",
2255 else if (!vf
->vf_queues
[q_idx
].rxq_active
)
2257 "rss_ind_table[%d] = %d, rxq is not active\n",
2260 index
= vf
->vf_queues
[q_idx
].fw_rx_qid
;
2261 p_rss
->rss_ind_table
[i
] = index
;
2264 p_data
->rss_params
= p_rss
;
2265 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_RSS
;
2269 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn
*p_hwfn
,
2270 struct qed_vf_info
*vf
,
2271 struct qed_sp_vport_update_params
*p_data
,
2272 struct qed_sge_tpa_params
*p_sge_tpa
,
2273 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2275 struct vfpf_vport_update_sge_tpa_tlv
*p_sge_tpa_tlv
;
2276 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
2278 p_sge_tpa_tlv
= (struct vfpf_vport_update_sge_tpa_tlv
*)
2279 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2281 if (!p_sge_tpa_tlv
) {
2282 p_data
->sge_tpa_params
= NULL
;
2286 memset(p_sge_tpa
, 0, sizeof(struct qed_sge_tpa_params
));
2288 p_sge_tpa
->update_tpa_en_flg
=
2289 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
& VFPF_UPDATE_TPA_EN_FLAG
);
2290 p_sge_tpa
->update_tpa_param_flg
=
2291 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
&
2292 VFPF_UPDATE_TPA_PARAM_FLAG
);
2294 p_sge_tpa
->tpa_ipv4_en_flg
=
2295 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV4_EN_FLAG
);
2296 p_sge_tpa
->tpa_ipv6_en_flg
=
2297 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV6_EN_FLAG
);
2298 p_sge_tpa
->tpa_pkt_split_flg
=
2299 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_PKT_SPLIT_FLAG
);
2300 p_sge_tpa
->tpa_hdr_data_split_flg
=
2301 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_HDR_DATA_SPLIT_FLAG
);
2302 p_sge_tpa
->tpa_gro_consistent_flg
=
2303 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_GRO_CONSIST_FLAG
);
2305 p_sge_tpa
->tpa_max_aggs_num
= p_sge_tpa_tlv
->tpa_max_aggs_num
;
2306 p_sge_tpa
->tpa_max_size
= p_sge_tpa_tlv
->tpa_max_size
;
2307 p_sge_tpa
->tpa_min_size_to_start
= p_sge_tpa_tlv
->tpa_min_size_to_start
;
2308 p_sge_tpa
->tpa_min_size_to_cont
= p_sge_tpa_tlv
->tpa_min_size_to_cont
;
2309 p_sge_tpa
->max_buffers_per_cqe
= p_sge_tpa_tlv
->max_buffers_per_cqe
;
2311 p_data
->sge_tpa_params
= p_sge_tpa
;
2313 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_SGE_TPA
;
2316 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn
*p_hwfn
,
2317 struct qed_ptt
*p_ptt
,
2318 struct qed_vf_info
*vf
)
2320 struct qed_sp_vport_update_params params
;
2321 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2322 struct qed_sge_tpa_params sge_tpa_params
;
2323 struct qed_rss_params rss_params
;
2324 u8 status
= PFVF_STATUS_SUCCESS
;
2329 /* Valiate PF can send such a request */
2330 if (!vf
->vport_instance
) {
2333 "No VPORT instance available for VF[%d], failing vport update\n",
2335 status
= PFVF_STATUS_FAILURE
;
2339 memset(¶ms
, 0, sizeof(params
));
2340 params
.opaque_fid
= vf
->opaque_fid
;
2341 params
.vport_id
= vf
->vport_id
;
2342 params
.rss_params
= NULL
;
2344 /* Search for extended tlvs list and update values
2345 * from VF in struct qed_sp_vport_update_params.
2347 qed_iov_vp_update_act_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2348 qed_iov_vp_update_vlan_param(p_hwfn
, ¶ms
, vf
, mbx
, &tlvs_mask
);
2349 qed_iov_vp_update_tx_switch(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2350 qed_iov_vp_update_mcast_bin_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2351 qed_iov_vp_update_accept_flag(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2352 qed_iov_vp_update_rss_param(p_hwfn
, vf
, ¶ms
, &rss_params
,
2354 qed_iov_vp_update_accept_any_vlan(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2355 qed_iov_vp_update_sge_tpa_param(p_hwfn
, vf
, ¶ms
,
2356 &sge_tpa_params
, mbx
, &tlvs_mask
);
2358 /* Just log a message if there is no single extended tlv in buffer.
2359 * When all features of vport update ramrod would be requested by VF
2360 * as extended TLVs in buffer then an error can be returned in response
2361 * if there is no extended TLV present in buffer.
2365 "No feature tlvs found for vport update\n");
2366 status
= PFVF_STATUS_NOT_SUPPORTED
;
2370 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
2373 status
= PFVF_STATUS_FAILURE
;
2376 length
= qed_iov_prep_vp_update_resp_tlvs(p_hwfn
, vf
, mbx
, status
,
2377 tlvs_mask
, tlvs_mask
);
2378 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
2381 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn
*p_hwfn
,
2382 struct qed_vf_info
*p_vf
,
2383 struct qed_filter_ucast
*p_params
)
2387 /* First remove entries and then add new ones */
2388 if (p_params
->opcode
== QED_FILTER_REMOVE
) {
2389 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
2390 if (p_vf
->shadow_config
.vlans
[i
].used
&&
2391 p_vf
->shadow_config
.vlans
[i
].vid
==
2393 p_vf
->shadow_config
.vlans
[i
].used
= false;
2396 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
2399 "VF [%d] - Tries to remove a non-existing vlan\n",
2400 p_vf
->relative_vf_id
);
2403 } else if (p_params
->opcode
== QED_FILTER_REPLACE
||
2404 p_params
->opcode
== QED_FILTER_FLUSH
) {
2405 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
2406 p_vf
->shadow_config
.vlans
[i
].used
= false;
2409 /* In forced mode, we're willing to remove entries - but we don't add
2412 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
))
2415 if (p_params
->opcode
== QED_FILTER_ADD
||
2416 p_params
->opcode
== QED_FILTER_REPLACE
) {
2417 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
2418 if (p_vf
->shadow_config
.vlans
[i
].used
)
2421 p_vf
->shadow_config
.vlans
[i
].used
= true;
2422 p_vf
->shadow_config
.vlans
[i
].vid
= p_params
->vlan
;
2426 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
2429 "VF [%d] - Tries to configure more than %d vlan filters\n",
2430 p_vf
->relative_vf_id
,
2431 QED_ETH_VF_NUM_VLAN_FILTERS
+ 1);
2439 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn
*p_hwfn
,
2440 struct qed_vf_info
*p_vf
,
2441 struct qed_filter_ucast
*p_params
)
2445 /* If we're in forced-mode, we don't allow any change */
2446 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
))
2449 /* First remove entries and then add new ones */
2450 if (p_params
->opcode
== QED_FILTER_REMOVE
) {
2451 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
2452 if (ether_addr_equal(p_vf
->shadow_config
.macs
[i
],
2454 memset(p_vf
->shadow_config
.macs
[i
], 0,
2460 if (i
== QED_ETH_VF_NUM_MAC_FILTERS
) {
2461 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2462 "MAC isn't configured\n");
2465 } else if (p_params
->opcode
== QED_FILTER_REPLACE
||
2466 p_params
->opcode
== QED_FILTER_FLUSH
) {
2467 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++)
2468 memset(p_vf
->shadow_config
.macs
[i
], 0, ETH_ALEN
);
2471 /* List the new MAC address */
2472 if (p_params
->opcode
!= QED_FILTER_ADD
&&
2473 p_params
->opcode
!= QED_FILTER_REPLACE
)
2476 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
2477 if (is_zero_ether_addr(p_vf
->shadow_config
.macs
[i
])) {
2478 ether_addr_copy(p_vf
->shadow_config
.macs
[i
],
2480 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2481 "Added MAC at %d entry in shadow\n", i
);
2486 if (i
== QED_ETH_VF_NUM_MAC_FILTERS
) {
2487 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "No available place for MAC\n");
2495 qed_iov_vf_update_unicast_shadow(struct qed_hwfn
*p_hwfn
,
2496 struct qed_vf_info
*p_vf
,
2497 struct qed_filter_ucast
*p_params
)
2501 if (p_params
->type
== QED_FILTER_MAC
) {
2502 rc
= qed_iov_vf_update_mac_shadow(p_hwfn
, p_vf
, p_params
);
2507 if (p_params
->type
== QED_FILTER_VLAN
)
2508 rc
= qed_iov_vf_update_vlan_shadow(p_hwfn
, p_vf
, p_params
);
2513 int qed_iov_chk_ucast(struct qed_hwfn
*hwfn
,
2514 int vfid
, struct qed_filter_ucast
*params
)
2516 struct qed_public_vf_info
*vf
;
2518 vf
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
2522 /* No real decision to make; Store the configured MAC */
2523 if (params
->type
== QED_FILTER_MAC
||
2524 params
->type
== QED_FILTER_MAC_VLAN
)
2525 ether_addr_copy(vf
->mac
, params
->mac
);
2530 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn
*p_hwfn
,
2531 struct qed_ptt
*p_ptt
,
2532 struct qed_vf_info
*vf
)
2534 struct qed_bulletin_content
*p_bulletin
= vf
->bulletin
.p_virt
;
2535 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2536 struct vfpf_ucast_filter_tlv
*req
;
2537 u8 status
= PFVF_STATUS_SUCCESS
;
2538 struct qed_filter_ucast params
;
2541 /* Prepare the unicast filter params */
2542 memset(¶ms
, 0, sizeof(struct qed_filter_ucast
));
2543 req
= &mbx
->req_virt
->ucast_filter
;
2544 params
.opcode
= (enum qed_filter_opcode
)req
->opcode
;
2545 params
.type
= (enum qed_filter_ucast_type
)req
->type
;
2547 params
.is_rx_filter
= 1;
2548 params
.is_tx_filter
= 1;
2549 params
.vport_to_remove_from
= vf
->vport_id
;
2550 params
.vport_to_add_to
= vf
->vport_id
;
2551 memcpy(params
.mac
, req
->mac
, ETH_ALEN
);
2552 params
.vlan
= req
->vlan
;
2556 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2557 vf
->abs_vf_id
, params
.opcode
, params
.type
,
2558 params
.is_rx_filter
? "RX" : "",
2559 params
.is_tx_filter
? "TX" : "",
2560 params
.vport_to_add_to
,
2561 params
.mac
[0], params
.mac
[1],
2562 params
.mac
[2], params
.mac
[3],
2563 params
.mac
[4], params
.mac
[5], params
.vlan
);
2565 if (!vf
->vport_instance
) {
2568 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
2570 status
= PFVF_STATUS_FAILURE
;
2574 /* Update shadow copy of the VF configuration */
2575 if (qed_iov_vf_update_unicast_shadow(p_hwfn
, vf
, ¶ms
)) {
2576 status
= PFVF_STATUS_FAILURE
;
2580 /* Determine if the unicast filtering is acceptible by PF */
2581 if ((p_bulletin
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
)) &&
2582 (params
.type
== QED_FILTER_VLAN
||
2583 params
.type
== QED_FILTER_MAC_VLAN
)) {
2584 /* Once VLAN is forced or PVID is set, do not allow
2585 * to add/replace any further VLANs.
2587 if (params
.opcode
== QED_FILTER_ADD
||
2588 params
.opcode
== QED_FILTER_REPLACE
)
2589 status
= PFVF_STATUS_FORCED
;
2593 if ((p_bulletin
->valid_bitmap
& BIT(MAC_ADDR_FORCED
)) &&
2594 (params
.type
== QED_FILTER_MAC
||
2595 params
.type
== QED_FILTER_MAC_VLAN
)) {
2596 if (!ether_addr_equal(p_bulletin
->mac
, params
.mac
) ||
2597 (params
.opcode
!= QED_FILTER_ADD
&&
2598 params
.opcode
!= QED_FILTER_REPLACE
))
2599 status
= PFVF_STATUS_FORCED
;
2603 rc
= qed_iov_chk_ucast(p_hwfn
, vf
->relative_vf_id
, ¶ms
);
2605 status
= PFVF_STATUS_FAILURE
;
2609 rc
= qed_sp_eth_filter_ucast(p_hwfn
, vf
->opaque_fid
, ¶ms
,
2610 QED_SPQ_MODE_CB
, NULL
);
2612 status
= PFVF_STATUS_FAILURE
;
2615 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UCAST_FILTER
,
2616 sizeof(struct pfvf_def_resp_tlv
), status
);
2619 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn
*p_hwfn
,
2620 struct qed_ptt
*p_ptt
,
2621 struct qed_vf_info
*vf
)
2626 for (i
= 0; i
< vf
->num_sbs
; i
++)
2627 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
2629 vf
->opaque_fid
, false);
2631 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_INT_CLEANUP
,
2632 sizeof(struct pfvf_def_resp_tlv
),
2633 PFVF_STATUS_SUCCESS
);
2636 static void qed_iov_vf_mbx_close(struct qed_hwfn
*p_hwfn
,
2637 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
2639 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2640 u8 status
= PFVF_STATUS_SUCCESS
;
2642 /* Disable Interrupts for VF */
2643 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
2645 /* Reset Permission table */
2646 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
2648 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_CLOSE
,
2652 static void qed_iov_vf_mbx_release(struct qed_hwfn
*p_hwfn
,
2653 struct qed_ptt
*p_ptt
,
2654 struct qed_vf_info
*p_vf
)
2656 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2657 u8 status
= PFVF_STATUS_SUCCESS
;
2660 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
2662 if (p_vf
->state
!= VF_STOPPED
&& p_vf
->state
!= VF_FREE
) {
2663 /* Stopping the VF */
2664 rc
= qed_sp_vf_stop(p_hwfn
, p_vf
->concrete_fid
,
2668 DP_ERR(p_hwfn
, "qed_sp_vf_stop returned error %d\n",
2670 status
= PFVF_STATUS_FAILURE
;
2673 p_vf
->state
= VF_STOPPED
;
2676 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
, CHANNEL_TLV_RELEASE
,
2681 qed_iov_vf_flr_poll_dorq(struct qed_hwfn
*p_hwfn
,
2682 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2687 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_vf
->concrete_fid
);
2689 for (cnt
= 0; cnt
< 50; cnt
++) {
2690 val
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_VF_USAGE_CNT
);
2695 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
2699 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
2700 p_vf
->abs_vf_id
, val
);
2708 qed_iov_vf_flr_poll_pbf(struct qed_hwfn
*p_hwfn
,
2709 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2711 u32 cons
[MAX_NUM_VOQS
], distance
[MAX_NUM_VOQS
];
2714 /* Read initial consumers & producers */
2715 for (i
= 0; i
< MAX_NUM_VOQS
; i
++) {
2718 cons
[i
] = qed_rd(p_hwfn
, p_ptt
,
2719 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
2721 prod
= qed_rd(p_hwfn
, p_ptt
,
2722 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0
+
2724 distance
[i
] = prod
- cons
[i
];
2727 /* Wait for consumers to pass the producers */
2729 for (cnt
= 0; cnt
< 50; cnt
++) {
2730 for (; i
< MAX_NUM_VOQS
; i
++) {
2733 tmp
= qed_rd(p_hwfn
, p_ptt
,
2734 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
2736 if (distance
[i
] > tmp
- cons
[i
])
2740 if (i
== MAX_NUM_VOQS
)
2747 DP_ERR(p_hwfn
, "VF[%d] - pbf polling failed on VOQ %d\n",
2748 p_vf
->abs_vf_id
, i
);
2755 static int qed_iov_vf_flr_poll(struct qed_hwfn
*p_hwfn
,
2756 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2760 rc
= qed_iov_vf_flr_poll_dorq(p_hwfn
, p_vf
, p_ptt
);
2764 rc
= qed_iov_vf_flr_poll_pbf(p_hwfn
, p_vf
, p_ptt
);
2772 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
,
2773 struct qed_ptt
*p_ptt
,
2774 u16 rel_vf_id
, u32
*ack_vfs
)
2776 struct qed_vf_info
*p_vf
;
2779 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
2783 if (p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &
2784 (1ULL << (rel_vf_id
% 64))) {
2785 u16 vfid
= p_vf
->abs_vf_id
;
2787 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2788 "VF[%d] - Handling FLR\n", vfid
);
2790 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
2792 /* If VF isn't active, no need for anything but SW */
2796 rc
= qed_iov_vf_flr_poll(p_hwfn
, p_vf
, p_ptt
);
2800 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, vfid
, true);
2802 DP_ERR(p_hwfn
, "Failed handle FLR of VF[%d]\n", vfid
);
2806 /* VF_STOPPED has to be set only after final cleanup
2807 * but prior to re-enabling the VF.
2809 p_vf
->state
= VF_STOPPED
;
2811 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, p_vf
);
2813 DP_ERR(p_hwfn
, "Failed to re-enable VF[%d] acces\n",
2818 /* Mark VF for ack and clean pending state */
2819 if (p_vf
->state
== VF_RESET
)
2820 p_vf
->state
= VF_STOPPED
;
2821 ack_vfs
[vfid
/ 32] |= BIT((vfid
% 32));
2822 p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &=
2823 ~(1ULL << (rel_vf_id
% 64));
2824 p_hwfn
->pf_iov_info
->pending_events
[rel_vf_id
/ 64] &=
2825 ~(1ULL << (rel_vf_id
% 64));
2831 int qed_iov_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2833 u32 ack_vfs
[VF_MAX_STATIC
/ 32];
2837 memset(ack_vfs
, 0, sizeof(u32
) * (VF_MAX_STATIC
/ 32));
2839 /* Since BRB <-> PRS interface can't be tested as part of the flr
2840 * polling due to HW limitations, simply sleep a bit. And since
2841 * there's no need to wait per-vf, do it before looping.
2845 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++)
2846 qed_iov_execute_vf_flr_cleanup(p_hwfn
, p_ptt
, i
, ack_vfs
);
2848 rc
= qed_mcp_ack_vf_flr(p_hwfn
, p_ptt
, ack_vfs
);
2852 int qed_iov_mark_vf_flr(struct qed_hwfn
*p_hwfn
, u32
*p_disabled_vfs
)
2856 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "Marking FLR-ed VFs\n");
2857 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
2858 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2859 "[%08x,...,%08x]: %08x\n",
2860 i
* 32, (i
+ 1) * 32 - 1, p_disabled_vfs
[i
]);
2862 if (!p_hwfn
->cdev
->p_iov_info
) {
2863 DP_NOTICE(p_hwfn
, "VF flr but no IOV\n");
2868 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
2869 struct qed_vf_info
*p_vf
;
2872 p_vf
= qed_iov_get_vf_info(p_hwfn
, i
, false);
2876 vfid
= p_vf
->abs_vf_id
;
2877 if (BIT((vfid
% 32)) & p_disabled_vfs
[vfid
/ 32]) {
2878 u64
*p_flr
= p_hwfn
->pf_iov_info
->pending_flr
;
2879 u16 rel_vf_id
= p_vf
->relative_vf_id
;
2881 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2882 "VF[%d] [rel %d] got FLR-ed\n",
2885 p_vf
->state
= VF_RESET
;
2887 /* No need to lock here, since pending_flr should
2888 * only change here and before ACKing MFw. Since
2889 * MFW will not trigger an additional attention for
2890 * VF flr until ACKs, we're safe.
2892 p_flr
[rel_vf_id
/ 64] |= 1ULL << (rel_vf_id
% 64);
2900 static void qed_iov_get_link(struct qed_hwfn
*p_hwfn
,
2902 struct qed_mcp_link_params
*p_params
,
2903 struct qed_mcp_link_state
*p_link
,
2904 struct qed_mcp_link_capabilities
*p_caps
)
2906 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
2909 struct qed_bulletin_content
*p_bulletin
;
2914 p_bulletin
= p_vf
->bulletin
.p_virt
;
2917 __qed_vf_get_link_params(p_hwfn
, p_params
, p_bulletin
);
2919 __qed_vf_get_link_state(p_hwfn
, p_link
, p_bulletin
);
2921 __qed_vf_get_link_caps(p_hwfn
, p_caps
, p_bulletin
);
2924 static void qed_iov_process_mbx_req(struct qed_hwfn
*p_hwfn
,
2925 struct qed_ptt
*p_ptt
, int vfid
)
2927 struct qed_iov_vf_mbx
*mbx
;
2928 struct qed_vf_info
*p_vf
;
2930 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2934 mbx
= &p_vf
->vf_mbx
;
2936 /* qed_iov_process_mbx_request */
2937 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2938 "VF[%02x]: Processing mailbox message\n", p_vf
->abs_vf_id
);
2940 mbx
->first_tlv
= mbx
->req_virt
->first_tlv
;
2942 /* check if tlv type is known */
2943 if (qed_iov_tlv_supported(mbx
->first_tlv
.tl
.type
)) {
2944 switch (mbx
->first_tlv
.tl
.type
) {
2945 case CHANNEL_TLV_ACQUIRE
:
2946 qed_iov_vf_mbx_acquire(p_hwfn
, p_ptt
, p_vf
);
2948 case CHANNEL_TLV_VPORT_START
:
2949 qed_iov_vf_mbx_start_vport(p_hwfn
, p_ptt
, p_vf
);
2951 case CHANNEL_TLV_VPORT_TEARDOWN
:
2952 qed_iov_vf_mbx_stop_vport(p_hwfn
, p_ptt
, p_vf
);
2954 case CHANNEL_TLV_START_RXQ
:
2955 qed_iov_vf_mbx_start_rxq(p_hwfn
, p_ptt
, p_vf
);
2957 case CHANNEL_TLV_START_TXQ
:
2958 qed_iov_vf_mbx_start_txq(p_hwfn
, p_ptt
, p_vf
);
2960 case CHANNEL_TLV_STOP_RXQS
:
2961 qed_iov_vf_mbx_stop_rxqs(p_hwfn
, p_ptt
, p_vf
);
2963 case CHANNEL_TLV_STOP_TXQS
:
2964 qed_iov_vf_mbx_stop_txqs(p_hwfn
, p_ptt
, p_vf
);
2966 case CHANNEL_TLV_UPDATE_RXQ
:
2967 qed_iov_vf_mbx_update_rxqs(p_hwfn
, p_ptt
, p_vf
);
2969 case CHANNEL_TLV_VPORT_UPDATE
:
2970 qed_iov_vf_mbx_vport_update(p_hwfn
, p_ptt
, p_vf
);
2972 case CHANNEL_TLV_UCAST_FILTER
:
2973 qed_iov_vf_mbx_ucast_filter(p_hwfn
, p_ptt
, p_vf
);
2975 case CHANNEL_TLV_CLOSE
:
2976 qed_iov_vf_mbx_close(p_hwfn
, p_ptt
, p_vf
);
2978 case CHANNEL_TLV_INT_CLEANUP
:
2979 qed_iov_vf_mbx_int_cleanup(p_hwfn
, p_ptt
, p_vf
);
2981 case CHANNEL_TLV_RELEASE
:
2982 qed_iov_vf_mbx_release(p_hwfn
, p_ptt
, p_vf
);
2986 /* unknown TLV - this may belong to a VF driver from the future
2987 * - a version written after this PF driver was written, which
2988 * supports features unknown as of yet. Too bad since we don't
2989 * support them. Or this may be because someone wrote a crappy
2990 * VF driver and is sending garbage over the channel.
2993 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
2995 mbx
->first_tlv
.tl
.type
,
2996 mbx
->first_tlv
.tl
.length
,
2997 mbx
->first_tlv
.padding
, mbx
->first_tlv
.reply_address
);
2999 /* Try replying in case reply address matches the acquisition's
3002 if (p_vf
->acquire
.first_tlv
.reply_address
&&
3003 (mbx
->first_tlv
.reply_address
==
3004 p_vf
->acquire
.first_tlv
.reply_address
)) {
3005 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
3006 mbx
->first_tlv
.tl
.type
,
3007 sizeof(struct pfvf_def_resp_tlv
),
3008 PFVF_STATUS_NOT_SUPPORTED
);
3012 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3018 void qed_iov_pf_add_pending_events(struct qed_hwfn
*p_hwfn
, u8 vfid
)
3020 u64 add_bit
= 1ULL << (vfid
% 64);
3022 p_hwfn
->pf_iov_info
->pending_events
[vfid
/ 64] |= add_bit
;
3025 static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn
*p_hwfn
,
3028 u64
*p_pending_events
= p_hwfn
->pf_iov_info
->pending_events
;
3030 memcpy(events
, p_pending_events
, sizeof(u64
) * QED_VF_ARRAY_LENGTH
);
3031 memset(p_pending_events
, 0, sizeof(u64
) * QED_VF_ARRAY_LENGTH
);
3034 static int qed_sriov_vfpf_msg(struct qed_hwfn
*p_hwfn
,
3035 u16 abs_vfid
, struct regpair
*vf_msg
)
3037 u8 min
= (u8
)p_hwfn
->cdev
->p_iov_info
->first_vf_in_pf
;
3038 struct qed_vf_info
*p_vf
;
3040 if (!qed_iov_pf_sanity_check(p_hwfn
, (int)abs_vfid
- min
)) {
3043 "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
3047 p_vf
= &p_hwfn
->pf_iov_info
->vfs_array
[(u8
)abs_vfid
- min
];
3049 /* List the physical address of the request so that handler
3050 * could later on copy the message from it.
3052 p_vf
->vf_mbx
.pending_req
= (((u64
)vf_msg
->hi
) << 32) | vf_msg
->lo
;
3054 /* Mark the event and schedule the workqueue */
3055 qed_iov_pf_add_pending_events(p_hwfn
, p_vf
->relative_vf_id
);
3056 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_MSG_FLAG
);
3061 int qed_sriov_eqe_event(struct qed_hwfn
*p_hwfn
,
3062 u8 opcode
, __le16 echo
, union event_ring_data
*data
)
3065 case COMMON_EVENT_VF_PF_CHANNEL
:
3066 return qed_sriov_vfpf_msg(p_hwfn
, le16_to_cpu(echo
),
3067 &data
->vf_pf_channel
.msg_addr
);
3069 DP_INFO(p_hwfn
->cdev
, "Unknown sriov eqe event 0x%02x\n",
3075 u16
qed_iov_get_next_active_vf(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
3077 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
3083 for (i
= rel_vf_id
; i
< p_iov
->total_vfs
; i
++)
3084 if (qed_iov_is_valid_vfid(p_hwfn
, rel_vf_id
, true))
3091 static int qed_iov_copy_vf_msg(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*ptt
,
3094 struct qed_dmae_params params
;
3095 struct qed_vf_info
*vf_info
;
3097 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3101 memset(¶ms
, 0, sizeof(struct qed_dmae_params
));
3102 params
.flags
= QED_DMAE_FLAG_VF_SRC
| QED_DMAE_FLAG_COMPLETION_DST
;
3103 params
.src_vfid
= vf_info
->abs_vf_id
;
3105 if (qed_dmae_host2host(p_hwfn
, ptt
,
3106 vf_info
->vf_mbx
.pending_req
,
3107 vf_info
->vf_mbx
.req_phys
,
3108 sizeof(union vfpf_tlvs
) / 4, ¶ms
)) {
3109 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3110 "Failed to copy message from VF 0x%02x\n", vfid
);
3118 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn
*p_hwfn
,
3121 struct qed_vf_info
*vf_info
;
3124 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3126 DP_NOTICE(p_hwfn
->cdev
,
3127 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
3131 feature
= 1 << MAC_ADDR_FORCED
;
3132 memcpy(vf_info
->bulletin
.p_virt
->mac
, mac
, ETH_ALEN
);
3134 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
3135 /* Forced MAC will disable MAC_ADDR */
3136 vf_info
->bulletin
.p_virt
->valid_bitmap
&= ~BIT(VFPF_BULLETIN_MAC_ADDR
);
3138 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
3141 void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn
*p_hwfn
,
3144 struct qed_vf_info
*vf_info
;
3147 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3149 DP_NOTICE(p_hwfn
->cdev
,
3150 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
3154 feature
= 1 << VLAN_ADDR_FORCED
;
3155 vf_info
->bulletin
.p_virt
->pvid
= pvid
;
3157 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
3159 vf_info
->bulletin
.p_virt
->valid_bitmap
&= ~feature
;
3161 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
3164 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn
*p_hwfn
, int vfid
)
3166 struct qed_vf_info
*p_vf_info
;
3168 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3172 return !!p_vf_info
->vport_instance
;
3175 bool qed_iov_is_vf_stopped(struct qed_hwfn
*p_hwfn
, int vfid
)
3177 struct qed_vf_info
*p_vf_info
;
3179 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3183 return p_vf_info
->state
== VF_STOPPED
;
3186 static bool qed_iov_spoofchk_get(struct qed_hwfn
*p_hwfn
, int vfid
)
3188 struct qed_vf_info
*vf_info
;
3190 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3194 return vf_info
->spoof_chk
;
3197 int qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
, int vfid
, bool val
)
3199 struct qed_vf_info
*vf
;
3202 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
3204 "SR-IOV sanity check failed, can't set spoofchk\n");
3208 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3212 if (!qed_iov_vf_has_vport_instance(p_hwfn
, vfid
)) {
3213 /* After VF VPORT start PF will configure spoof check */
3214 vf
->req_spoofchk_val
= val
;
3219 rc
= __qed_iov_spoofchk_set(p_hwfn
, vf
, val
);
3225 static u8
*qed_iov_bulletin_get_forced_mac(struct qed_hwfn
*p_hwfn
,
3228 struct qed_vf_info
*p_vf
;
3230 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3231 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
3234 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
)))
3237 return p_vf
->bulletin
.p_virt
->mac
;
3240 u16
qed_iov_bulletin_get_forced_vlan(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
3242 struct qed_vf_info
*p_vf
;
3244 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3245 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
3248 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
)))
3251 return p_vf
->bulletin
.p_virt
->pvid
;
3254 static int qed_iov_configure_tx_rate(struct qed_hwfn
*p_hwfn
,
3255 struct qed_ptt
*p_ptt
, int vfid
, int val
)
3257 struct qed_vf_info
*vf
;
3261 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3265 rc
= qed_fw_vport(p_hwfn
, vf
->vport_id
, &abs_vp_id
);
3269 return qed_init_vport_rl(p_hwfn
, p_ptt
, abs_vp_id
, (u32
)val
);
3272 int qed_iov_configure_min_tx_rate(struct qed_dev
*cdev
, int vfid
, u32 rate
)
3274 struct qed_vf_info
*vf
;
3278 for_each_hwfn(cdev
, i
) {
3279 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3281 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
3283 "SR-IOV sanity check failed, can't set min rate\n");
3288 vf
= qed_iov_get_vf_info(QED_LEADING_HWFN(cdev
), (u16
)vfid
, true);
3289 vport_id
= vf
->vport_id
;
3291 return qed_configure_vport_wfq(cdev
, vport_id
, rate
);
3294 static int qed_iov_get_vf_min_rate(struct qed_hwfn
*p_hwfn
, int vfid
)
3296 struct qed_wfq_data
*vf_vp_wfq
;
3297 struct qed_vf_info
*vf_info
;
3299 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3303 vf_vp_wfq
= &p_hwfn
->qm_info
.wfq_data
[vf_info
->vport_id
];
3305 if (vf_vp_wfq
->configured
)
3306 return vf_vp_wfq
->min_speed
;
3312 * qed_schedule_iov - schedules IOV task for VF and PF
3313 * @hwfn: hardware function pointer
3314 * @flag: IOV flag for VF/PF
3316 void qed_schedule_iov(struct qed_hwfn
*hwfn
, enum qed_iov_wq_flag flag
)
3318 smp_mb__before_atomic();
3319 set_bit(flag
, &hwfn
->iov_task_flags
);
3320 smp_mb__after_atomic();
3321 DP_VERBOSE(hwfn
, QED_MSG_IOV
, "Scheduling iov task [Flag: %d]\n", flag
);
3322 queue_delayed_work(hwfn
->iov_wq
, &hwfn
->iov_task
, 0);
3325 void qed_vf_start_iov_wq(struct qed_dev
*cdev
)
3329 for_each_hwfn(cdev
, i
)
3330 queue_delayed_work(cdev
->hwfns
[i
].iov_wq
,
3331 &cdev
->hwfns
[i
].iov_task
, 0);
3334 int qed_sriov_disable(struct qed_dev
*cdev
, bool pci_enabled
)
3338 for_each_hwfn(cdev
, i
)
3339 if (cdev
->hwfns
[i
].iov_wq
)
3340 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
3342 /* Mark VFs for disablement */
3343 qed_iov_set_vfs_to_disable(cdev
, true);
3345 if (cdev
->p_iov_info
&& cdev
->p_iov_info
->num_vfs
&& pci_enabled
)
3346 pci_disable_sriov(cdev
->pdev
);
3348 for_each_hwfn(cdev
, i
) {
3349 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3350 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3352 /* Failure to acquire the ptt in 100g creates an odd error
3353 * where the first engine has already relased IOV.
3356 DP_ERR(hwfn
, "Failed to acquire ptt\n");
3360 /* Clean WFQ db and configure equal weight for all vports */
3361 qed_clean_wfq_db(hwfn
, ptt
);
3363 qed_for_each_vf(hwfn
, j
) {
3366 if (!qed_iov_is_valid_vfid(hwfn
, j
, true))
3369 /* Wait until VF is disabled before releasing */
3370 for (k
= 0; k
< 100; k
++) {
3371 if (!qed_iov_is_vf_stopped(hwfn
, j
))
3378 qed_iov_release_hw_for_vf(&cdev
->hwfns
[i
],
3382 "Timeout waiting for VF's FLR to end\n");
3385 qed_ptt_release(hwfn
, ptt
);
3388 qed_iov_set_vfs_to_disable(cdev
, false);
3393 static int qed_sriov_enable(struct qed_dev
*cdev
, int num
)
3395 struct qed_sb_cnt_info sb_cnt_info
;
3398 if (num
>= RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
)) {
3399 DP_NOTICE(cdev
, "Can start at most %d VFs\n",
3400 RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
) - 1);
3404 /* Initialize HW for VF access */
3405 for_each_hwfn(cdev
, j
) {
3406 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[j
];
3407 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3408 int num_sbs
= 0, limit
= 16;
3411 DP_ERR(hwfn
, "Failed to acquire ptt\n");
3416 if (IS_MF_DEFAULT(hwfn
))
3417 limit
= MAX_NUM_VFS_BB
/ hwfn
->num_funcs_on_engine
;
3419 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
3420 qed_int_get_num_sbs(hwfn
, &sb_cnt_info
);
3421 num_sbs
= min_t(int, sb_cnt_info
.sb_free_blk
, limit
);
3423 for (i
= 0; i
< num
; i
++) {
3424 if (!qed_iov_is_valid_vfid(hwfn
, i
, false))
3427 rc
= qed_iov_init_hw_for_vf(hwfn
,
3428 ptt
, i
, num_sbs
/ num
);
3430 DP_ERR(cdev
, "Failed to enable VF[%d]\n", i
);
3431 qed_ptt_release(hwfn
, ptt
);
3436 qed_ptt_release(hwfn
, ptt
);
3439 /* Enable SRIOV PCIe functions */
3440 rc
= pci_enable_sriov(cdev
->pdev
, num
);
3442 DP_ERR(cdev
, "Failed to enable sriov [%d]\n", rc
);
3449 qed_sriov_disable(cdev
, false);
3453 static int qed_sriov_configure(struct qed_dev
*cdev
, int num_vfs_param
)
3455 if (!IS_QED_SRIOV(cdev
)) {
3456 DP_VERBOSE(cdev
, QED_MSG_IOV
, "SR-IOV is not supported\n");
3461 return qed_sriov_enable(cdev
, num_vfs_param
);
3463 return qed_sriov_disable(cdev
, true);
3466 static int qed_sriov_pf_set_mac(struct qed_dev
*cdev
, u8
*mac
, int vfid
)
3470 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
3471 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3472 "Cannot set a VF MAC; Sriov is not enabled\n");
3476 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true)) {
3477 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3478 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
3482 for_each_hwfn(cdev
, i
) {
3483 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3484 struct qed_public_vf_info
*vf_info
;
3486 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3490 /* Set the forced MAC, and schedule the IOV task */
3491 ether_addr_copy(vf_info
->forced_mac
, mac
);
3492 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
3498 static int qed_sriov_pf_set_vlan(struct qed_dev
*cdev
, u16 vid
, int vfid
)
3502 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
3503 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3504 "Cannot set a VF MAC; Sriov is not enabled\n");
3508 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true)) {
3509 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3510 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
3514 for_each_hwfn(cdev
, i
) {
3515 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3516 struct qed_public_vf_info
*vf_info
;
3518 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3522 /* Set the forced vlan, and schedule the IOV task */
3523 vf_info
->forced_vlan
= vid
;
3524 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
3530 static int qed_get_vf_config(struct qed_dev
*cdev
,
3531 int vf_id
, struct ifla_vf_info
*ivi
)
3533 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
3534 struct qed_public_vf_info
*vf_info
;
3535 struct qed_mcp_link_state link
;
3538 /* Sanitize request */
3542 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true)) {
3543 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3544 "VF index [%d] isn't active\n", vf_id
);
3548 vf_info
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
3550 qed_iov_get_link(hwfn
, vf_id
, NULL
, &link
, NULL
);
3552 /* Fill information about VF */
3555 if (is_valid_ether_addr(vf_info
->forced_mac
))
3556 ether_addr_copy(ivi
->mac
, vf_info
->forced_mac
);
3558 ether_addr_copy(ivi
->mac
, vf_info
->mac
);
3560 ivi
->vlan
= vf_info
->forced_vlan
;
3561 ivi
->spoofchk
= qed_iov_spoofchk_get(hwfn
, vf_id
);
3562 ivi
->linkstate
= vf_info
->link_state
;
3563 tx_rate
= vf_info
->tx_rate
;
3564 ivi
->max_tx_rate
= tx_rate
? tx_rate
: link
.speed
;
3565 ivi
->min_tx_rate
= qed_iov_get_vf_min_rate(hwfn
, vf_id
);
3570 void qed_inform_vf_link_state(struct qed_hwfn
*hwfn
)
3572 struct qed_mcp_link_capabilities caps
;
3573 struct qed_mcp_link_params params
;
3574 struct qed_mcp_link_state link
;
3577 if (!hwfn
->pf_iov_info
)
3580 /* Update bulletin of all future possible VFs with link configuration */
3581 for (i
= 0; i
< hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
3582 struct qed_public_vf_info
*vf_info
;
3584 vf_info
= qed_iov_get_public_vf_info(hwfn
, i
, false);
3588 memcpy(¶ms
, qed_mcp_get_link_params(hwfn
), sizeof(params
));
3589 memcpy(&link
, qed_mcp_get_link_state(hwfn
), sizeof(link
));
3590 memcpy(&caps
, qed_mcp_get_link_capabilities(hwfn
),
3593 /* Modify link according to the VF's configured link state */
3594 switch (vf_info
->link_state
) {
3595 case IFLA_VF_LINK_STATE_DISABLE
:
3596 link
.link_up
= false;
3598 case IFLA_VF_LINK_STATE_ENABLE
:
3599 link
.link_up
= true;
3600 /* Set speed according to maximum supported by HW.
3601 * that is 40G for regular devices and 100G for CMT
3604 link
.speed
= (hwfn
->cdev
->num_hwfns
> 1) ?
3607 /* In auto mode pass PF link image to VF */
3611 if (link
.link_up
&& vf_info
->tx_rate
) {
3612 struct qed_ptt
*ptt
;
3615 rate
= min_t(int, vf_info
->tx_rate
, link
.speed
);
3617 ptt
= qed_ptt_acquire(hwfn
);
3619 DP_NOTICE(hwfn
, "Failed to acquire PTT\n");
3623 if (!qed_iov_configure_tx_rate(hwfn
, ptt
, i
, rate
)) {
3624 vf_info
->tx_rate
= rate
;
3628 qed_ptt_release(hwfn
, ptt
);
3631 qed_iov_set_link(hwfn
, i
, ¶ms
, &link
, &caps
);
3634 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3637 static int qed_set_vf_link_state(struct qed_dev
*cdev
,
3638 int vf_id
, int link_state
)
3642 /* Sanitize request */
3646 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true)) {
3647 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3648 "VF index [%d] isn't active\n", vf_id
);
3652 /* Handle configuration of link state */
3653 for_each_hwfn(cdev
, i
) {
3654 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3655 struct qed_public_vf_info
*vf
;
3657 vf
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
3661 if (vf
->link_state
== link_state
)
3664 vf
->link_state
= link_state
;
3665 qed_inform_vf_link_state(&cdev
->hwfns
[i
]);
3671 static int qed_spoof_configure(struct qed_dev
*cdev
, int vfid
, bool val
)
3673 int i
, rc
= -EINVAL
;
3675 for_each_hwfn(cdev
, i
) {
3676 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3678 rc
= qed_iov_spoofchk_set(p_hwfn
, vfid
, val
);
3686 static int qed_configure_max_vf_rate(struct qed_dev
*cdev
, int vfid
, int rate
)
3690 for_each_hwfn(cdev
, i
) {
3691 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3692 struct qed_public_vf_info
*vf
;
3694 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
3696 "SR-IOV sanity check failed, can't set tx rate\n");
3700 vf
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, true);
3704 qed_inform_vf_link_state(p_hwfn
);
3710 static int qed_set_vf_rate(struct qed_dev
*cdev
,
3711 int vfid
, u32 min_rate
, u32 max_rate
)
3713 int rc_min
= 0, rc_max
= 0;
3716 rc_max
= qed_configure_max_vf_rate(cdev
, vfid
, max_rate
);
3719 rc_min
= qed_iov_configure_min_tx_rate(cdev
, vfid
, min_rate
);
3721 if (rc_max
| rc_min
)
3727 static void qed_handle_vf_msg(struct qed_hwfn
*hwfn
)
3729 u64 events
[QED_VF_ARRAY_LENGTH
];
3730 struct qed_ptt
*ptt
;
3733 ptt
= qed_ptt_acquire(hwfn
);
3735 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
3736 "Can't acquire PTT; re-scheduling\n");
3737 qed_schedule_iov(hwfn
, QED_IOV_WQ_MSG_FLAG
);
3741 qed_iov_pf_get_and_clear_pending_events(hwfn
, events
);
3743 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
3744 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
3745 events
[0], events
[1], events
[2]);
3747 qed_for_each_vf(hwfn
, i
) {
3748 /* Skip VFs with no pending messages */
3749 if (!(events
[i
/ 64] & (1ULL << (i
% 64))))
3752 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
3753 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
3754 i
, hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
3756 /* Copy VF's message to PF's request buffer for that VF */
3757 if (qed_iov_copy_vf_msg(hwfn
, ptt
, i
))
3760 qed_iov_process_mbx_req(hwfn
, ptt
, i
);
3763 qed_ptt_release(hwfn
, ptt
);
3766 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn
*hwfn
)
3770 qed_for_each_vf(hwfn
, i
) {
3771 struct qed_public_vf_info
*info
;
3772 bool update
= false;
3775 info
= qed_iov_get_public_vf_info(hwfn
, i
, true);
3779 /* Update data on bulletin board */
3780 mac
= qed_iov_bulletin_get_forced_mac(hwfn
, i
);
3781 if (is_valid_ether_addr(info
->forced_mac
) &&
3782 (!mac
|| !ether_addr_equal(mac
, info
->forced_mac
))) {
3785 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
3787 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
3789 /* Update bulletin board with forced MAC */
3790 qed_iov_bulletin_set_forced_mac(hwfn
,
3791 info
->forced_mac
, i
);
3795 if (qed_iov_bulletin_get_forced_vlan(hwfn
, i
) ^
3796 info
->forced_vlan
) {
3799 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
3802 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
3803 qed_iov_bulletin_set_forced_vlan(hwfn
,
3804 info
->forced_vlan
, i
);
3809 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3813 static void qed_handle_bulletin_post(struct qed_hwfn
*hwfn
)
3815 struct qed_ptt
*ptt
;
3818 ptt
= qed_ptt_acquire(hwfn
);
3820 DP_NOTICE(hwfn
, "Failed allocating a ptt entry\n");
3821 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3825 qed_for_each_vf(hwfn
, i
)
3826 qed_iov_post_vf_bulletin(hwfn
, i
, ptt
);
3828 qed_ptt_release(hwfn
, ptt
);
3831 void qed_iov_pf_task(struct work_struct
*work
)
3833 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
3837 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG
, &hwfn
->iov_task_flags
))
3840 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG
, &hwfn
->iov_task_flags
)) {
3841 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3844 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
3848 rc
= qed_iov_vf_flr_cleanup(hwfn
, ptt
);
3850 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
3852 qed_ptt_release(hwfn
, ptt
);
3855 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG
, &hwfn
->iov_task_flags
))
3856 qed_handle_vf_msg(hwfn
);
3858 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
,
3859 &hwfn
->iov_task_flags
))
3860 qed_handle_pf_set_vf_unicast(hwfn
);
3862 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG
,
3863 &hwfn
->iov_task_flags
))
3864 qed_handle_bulletin_post(hwfn
);
3867 void qed_iov_wq_stop(struct qed_dev
*cdev
, bool schedule_first
)
3871 for_each_hwfn(cdev
, i
) {
3872 if (!cdev
->hwfns
[i
].iov_wq
)
3875 if (schedule_first
) {
3876 qed_schedule_iov(&cdev
->hwfns
[i
],
3877 QED_IOV_WQ_STOP_WQ_FLAG
);
3878 cancel_delayed_work_sync(&cdev
->hwfns
[i
].iov_task
);
3881 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
3882 destroy_workqueue(cdev
->hwfns
[i
].iov_wq
);
3886 int qed_iov_wq_start(struct qed_dev
*cdev
)
3888 char name
[NAME_SIZE
];
3891 for_each_hwfn(cdev
, i
) {
3892 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3894 /* PFs needs a dedicated workqueue only if they support IOV.
3895 * VFs always require one.
3897 if (IS_PF(p_hwfn
->cdev
) && !IS_PF_SRIOV(p_hwfn
))
3900 snprintf(name
, NAME_SIZE
, "iov-%02x:%02x.%02x",
3901 cdev
->pdev
->bus
->number
,
3902 PCI_SLOT(cdev
->pdev
->devfn
), p_hwfn
->abs_pf_id
);
3904 p_hwfn
->iov_wq
= create_singlethread_workqueue(name
);
3905 if (!p_hwfn
->iov_wq
) {
3906 DP_NOTICE(p_hwfn
, "Cannot create iov workqueue\n");
3911 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_pf_task
);
3913 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_vf_task
);
3919 const struct qed_iov_hv_ops qed_iov_ops_pass
= {
3920 .configure
= &qed_sriov_configure
,
3921 .set_mac
= &qed_sriov_pf_set_mac
,
3922 .set_vlan
= &qed_sriov_pf_set_vlan
,
3923 .get_config
= &qed_get_vf_config
,
3924 .set_link_state
= &qed_set_vf_link_state
,
3925 .set_spoof
= &qed_spoof_configure
,
3926 .set_rate
= &qed_set_vf_rate
,