1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
29 /***********************misc routines*****************************/
32 * i40e_vc_isvalid_vsi_id
33 * @vf: pointer to the vf info
34 * @vsi_id: vf relative vsi id
36 * check for the valid vsi id
38 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf
*vf
, u8 vsi_id
)
40 struct i40e_pf
*pf
= vf
->pf
;
42 return pf
->vsi
[vsi_id
]->vf_id
== vf
->vf_id
;
46 * i40e_vc_isvalid_queue_id
47 * @vf: pointer to the vf info
49 * @qid: vsi relative queue id
51 * check for the valid queue id
53 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf
*vf
, u8 vsi_id
,
56 struct i40e_pf
*pf
= vf
->pf
;
58 return qid
< pf
->vsi
[vsi_id
]->num_queue_pairs
;
62 * i40e_vc_isvalid_vector_id
63 * @vf: pointer to the vf info
64 * @vector_id: vf relative vector id
66 * check for the valid vector id
68 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf
*vf
, u8 vector_id
)
70 struct i40e_pf
*pf
= vf
->pf
;
72 return vector_id
<= pf
->hw
.func_caps
.num_msix_vectors_vf
;
75 /***********************vf resource mgmt routines*****************/
78 * i40e_vc_get_pf_queue_id
79 * @vf: pointer to the vf info
80 * @vsi_idx: index of VSI in PF struct
81 * @vsi_queue_id: vsi relative queue id
83 * return pf relative queue id
85 static u16
i40e_vc_get_pf_queue_id(struct i40e_vf
*vf
, u8 vsi_idx
,
88 struct i40e_pf
*pf
= vf
->pf
;
89 struct i40e_vsi
*vsi
= pf
->vsi
[vsi_idx
];
90 u16 pf_queue_id
= I40E_QUEUE_END_OF_LIST
;
92 if (le16_to_cpu(vsi
->info
.mapping_flags
) &
93 I40E_AQ_VSI_QUE_MAP_NONCONTIG
)
95 le16_to_cpu(vsi
->info
.queue_mapping
[vsi_queue_id
]);
97 pf_queue_id
= le16_to_cpu(vsi
->info
.queue_mapping
[0]) +
104 * i40e_config_irq_link_list
105 * @vf: pointer to the vf info
106 * @vsi_idx: index of VSI in PF struct
107 * @vecmap: irq map info
109 * configure irq link list from the map
111 static void i40e_config_irq_link_list(struct i40e_vf
*vf
, u16 vsi_idx
,
112 struct i40e_virtchnl_vector_map
*vecmap
)
114 unsigned long linklistmap
= 0, tempmap
;
115 struct i40e_pf
*pf
= vf
->pf
;
116 struct i40e_hw
*hw
= &pf
->hw
;
117 u16 vsi_queue_id
, pf_queue_id
;
118 enum i40e_queue_type qtype
;
119 u16 next_q
, vector_id
;
123 vector_id
= vecmap
->vector_id
;
126 reg_idx
= I40E_VPINT_LNKLST0(vf
->vf_id
);
128 reg_idx
= I40E_VPINT_LNKLSTN(
129 (pf
->hw
.func_caps
.num_msix_vectors_vf
130 * vf
->vf_id
) + (vector_id
- 1));
132 if (vecmap
->rxq_map
== 0 && vecmap
->txq_map
== 0) {
133 /* Special case - No queues mapped on this vector */
134 wr32(hw
, reg_idx
, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK
);
137 tempmap
= vecmap
->rxq_map
;
138 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
140 (I40E_VIRTCHNL_SUPPORTED_QTYPES
*
144 tempmap
= vecmap
->txq_map
;
145 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
147 (I40E_VIRTCHNL_SUPPORTED_QTYPES
* vsi_queue_id
151 next_q
= find_first_bit(&linklistmap
,
153 I40E_VIRTCHNL_SUPPORTED_QTYPES
));
154 vsi_queue_id
= next_q
/I40E_VIRTCHNL_SUPPORTED_QTYPES
;
155 qtype
= next_q
%I40E_VIRTCHNL_SUPPORTED_QTYPES
;
156 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
, vsi_queue_id
);
157 reg
= ((qtype
<< I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT
) | pf_queue_id
);
159 wr32(hw
, reg_idx
, reg
);
161 while (next_q
< (I40E_MAX_VSI_QP
* I40E_VIRTCHNL_SUPPORTED_QTYPES
)) {
163 case I40E_QUEUE_TYPE_RX
:
164 reg_idx
= I40E_QINT_RQCTL(pf_queue_id
);
165 itr_idx
= vecmap
->rxitr_idx
;
167 case I40E_QUEUE_TYPE_TX
:
168 reg_idx
= I40E_QINT_TQCTL(pf_queue_id
);
169 itr_idx
= vecmap
->txitr_idx
;
175 next_q
= find_next_bit(&linklistmap
,
177 I40E_VIRTCHNL_SUPPORTED_QTYPES
),
179 if (next_q
< (I40E_MAX_VSI_QP
* I40E_VIRTCHNL_SUPPORTED_QTYPES
)) {
180 vsi_queue_id
= next_q
/ I40E_VIRTCHNL_SUPPORTED_QTYPES
;
181 qtype
= next_q
% I40E_VIRTCHNL_SUPPORTED_QTYPES
;
182 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
,
185 pf_queue_id
= I40E_QUEUE_END_OF_LIST
;
189 /* format for the RQCTL & TQCTL regs is same */
191 (qtype
<< I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT
) |
192 (pf_queue_id
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
) |
193 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT
) |
194 (itr_idx
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
);
195 wr32(hw
, reg_idx
, reg
);
203 * i40e_config_vsi_tx_queue
204 * @vf: pointer to the vf info
205 * @vsi_idx: index of VSI in PF struct
206 * @vsi_queue_id: vsi relative queue index
207 * @info: config. info
211 static int i40e_config_vsi_tx_queue(struct i40e_vf
*vf
, u16 vsi_idx
,
213 struct i40e_virtchnl_txq_info
*info
)
215 struct i40e_pf
*pf
= vf
->pf
;
216 struct i40e_hw
*hw
= &pf
->hw
;
217 struct i40e_hmc_obj_txq tx_ctx
;
222 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
, vsi_queue_id
);
224 /* clear the context structure first */
225 memset(&tx_ctx
, 0, sizeof(struct i40e_hmc_obj_txq
));
227 /* only set the required fields */
228 tx_ctx
.base
= info
->dma_ring_addr
/ 128;
229 tx_ctx
.qlen
= info
->ring_len
;
230 tx_ctx
.rdylist
= le16_to_cpu(pf
->vsi
[vsi_idx
]->info
.qs_handle
[0]);
231 tx_ctx
.rdylist_act
= 0;
233 /* clear the context in the HMC */
234 ret
= i40e_clear_lan_tx_queue_context(hw
, pf_queue_id
);
236 dev_err(&pf
->pdev
->dev
,
237 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
243 /* set the context in the HMC */
244 ret
= i40e_set_lan_tx_queue_context(hw
, pf_queue_id
, &tx_ctx
);
246 dev_err(&pf
->pdev
->dev
,
247 "Failed to set VF LAN Tx queue context %d error: %d\n",
253 /* associate this queue with the PCI VF function */
254 qtx_ctl
= I40E_QTX_CTL_VF_QUEUE
;
255 qtx_ctl
|= ((hw
->pf_id
<< I40E_QTX_CTL_PF_INDX_SHIFT
)
256 & I40E_QTX_CTL_PF_INDX_MASK
);
257 qtx_ctl
|= (((vf
->vf_id
+ hw
->func_caps
.vf_base_id
)
258 << I40E_QTX_CTL_VFVM_INDX_SHIFT
)
259 & I40E_QTX_CTL_VFVM_INDX_MASK
);
260 wr32(hw
, I40E_QTX_CTL(pf_queue_id
), qtx_ctl
);
268 * i40e_config_vsi_rx_queue
269 * @vf: pointer to the vf info
270 * @vsi_idx: index of VSI in PF struct
271 * @vsi_queue_id: vsi relative queue index
272 * @info: config. info
276 static int i40e_config_vsi_rx_queue(struct i40e_vf
*vf
, u16 vsi_idx
,
278 struct i40e_virtchnl_rxq_info
*info
)
280 struct i40e_pf
*pf
= vf
->pf
;
281 struct i40e_hw
*hw
= &pf
->hw
;
282 struct i40e_hmc_obj_rxq rx_ctx
;
286 pf_queue_id
= i40e_vc_get_pf_queue_id(vf
, vsi_idx
, vsi_queue_id
);
288 /* clear the context structure first */
289 memset(&rx_ctx
, 0, sizeof(struct i40e_hmc_obj_rxq
));
291 /* only set the required fields */
292 rx_ctx
.base
= info
->dma_ring_addr
/ 128;
293 rx_ctx
.qlen
= info
->ring_len
;
295 if (info
->splithdr_enabled
) {
296 rx_ctx
.hsplit_0
= I40E_RX_SPLIT_L2
|
298 I40E_RX_SPLIT_TCP_UDP
|
300 /* header length validation */
301 if (info
->hdr_size
> ((2 * 1024) - 64)) {
305 rx_ctx
.hbuff
= info
->hdr_size
>> I40E_RXQ_CTX_HBUFF_SHIFT
;
307 /* set splitalways mode 10b */
311 /* databuffer length validation */
312 if (info
->databuffer_size
> ((16 * 1024) - 128)) {
316 rx_ctx
.dbuff
= info
->databuffer_size
>> I40E_RXQ_CTX_DBUFF_SHIFT
;
318 /* max pkt. length validation */
319 if (info
->max_pkt_size
>= (16 * 1024) || info
->max_pkt_size
< 64) {
323 rx_ctx
.rxmax
= info
->max_pkt_size
;
325 /* enable 32bytes desc always */
329 rx_ctx
.tphrdesc_ena
= 1;
330 rx_ctx
.tphwdesc_ena
= 1;
331 rx_ctx
.tphdata_ena
= 1;
332 rx_ctx
.tphhead_ena
= 1;
333 rx_ctx
.lrxqthresh
= 2;
336 /* clear the context in the HMC */
337 ret
= i40e_clear_lan_rx_queue_context(hw
, pf_queue_id
);
339 dev_err(&pf
->pdev
->dev
,
340 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
346 /* set the context in the HMC */
347 ret
= i40e_set_lan_rx_queue_context(hw
, pf_queue_id
, &rx_ctx
);
349 dev_err(&pf
->pdev
->dev
,
350 "Failed to set VF LAN Rx queue context %d error: %d\n",
362 * @vf: pointer to the vf info
363 * @type: type of VSI to allocate
365 * alloc vf vsi context & resources
367 static int i40e_alloc_vsi_res(struct i40e_vf
*vf
, enum i40e_vsi_type type
)
369 struct i40e_mac_filter
*f
= NULL
;
370 struct i40e_pf
*pf
= vf
->pf
;
371 struct i40e_vsi
*vsi
;
374 vsi
= i40e_vsi_setup(pf
, type
, pf
->vsi
[pf
->lan_vsi
]->seid
, vf
->vf_id
);
377 dev_err(&pf
->pdev
->dev
,
378 "add vsi failed for vf %d, aq_err %d\n",
379 vf
->vf_id
, pf
->hw
.aq
.asq_last_status
);
381 goto error_alloc_vsi_res
;
383 if (type
== I40E_VSI_SRIOV
) {
384 u8 brdcast
[ETH_ALEN
] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
385 vf
->lan_vsi_index
= vsi
->idx
;
386 vf
->lan_vsi_id
= vsi
->id
;
387 dev_info(&pf
->pdev
->dev
,
388 "LAN VSI index %d, VSI id %d\n",
390 /* If the port VLAN has been configured and then the
391 * VF driver was removed then the VSI port VLAN
392 * configuration was destroyed. Check if there is
393 * a port VLAN and restore the VSI configuration if
396 if (vf
->port_vlan_id
)
397 i40e_vsi_add_pvid(vsi
, vf
->port_vlan_id
);
398 f
= i40e_add_filter(vsi
, vf
->default_lan_addr
.addr
,
399 vf
->port_vlan_id
, true, false);
401 dev_info(&pf
->pdev
->dev
,
402 "Could not allocate VF MAC addr\n");
403 f
= i40e_add_filter(vsi
, brdcast
, vf
->port_vlan_id
,
406 dev_info(&pf
->pdev
->dev
,
407 "Could not allocate VF broadcast filter\n");
411 dev_err(&pf
->pdev
->dev
, "Unable to add ucast filter\n");
413 goto error_alloc_vsi_res
;
416 /* program mac filter */
417 ret
= i40e_sync_vsi_filters(vsi
);
419 dev_err(&pf
->pdev
->dev
, "Unable to program ucast filters\n");
420 goto error_alloc_vsi_res
;
428 * i40e_enable_vf_mappings
429 * @vf: pointer to the vf info
433 static void i40e_enable_vf_mappings(struct i40e_vf
*vf
)
435 struct i40e_pf
*pf
= vf
->pf
;
436 struct i40e_hw
*hw
= &pf
->hw
;
437 u32 reg
, total_queue_pairs
= 0;
440 /* Tell the hardware we're using noncontiguous mapping. HW requires
441 * that VF queues be mapped using this method, even when they are
442 * contiguous in real life
444 wr32(hw
, I40E_VSILAN_QBASE(vf
->lan_vsi_id
),
445 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK
);
447 /* enable VF vplan_qtable mappings */
448 reg
= I40E_VPLAN_MAPENA_TXRX_ENA_MASK
;
449 wr32(hw
, I40E_VPLAN_MAPENA(vf
->vf_id
), reg
);
451 /* map PF queues to VF queues */
452 for (j
= 0; j
< pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
; j
++) {
453 u16 qid
= i40e_vc_get_pf_queue_id(vf
, vf
->lan_vsi_index
, j
);
454 reg
= (qid
& I40E_VPLAN_QTABLE_QINDEX_MASK
);
455 wr32(hw
, I40E_VPLAN_QTABLE(total_queue_pairs
, vf
->vf_id
), reg
);
459 /* map PF queues to VSI */
460 for (j
= 0; j
< 7; j
++) {
461 if (j
* 2 >= pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
) {
462 reg
= 0x07FF07FF; /* unused */
464 u16 qid
= i40e_vc_get_pf_queue_id(vf
, vf
->lan_vsi_index
,
467 qid
= i40e_vc_get_pf_queue_id(vf
, vf
->lan_vsi_index
,
471 wr32(hw
, I40E_VSILAN_QTABLE(j
, vf
->lan_vsi_id
), reg
);
478 * i40e_disable_vf_mappings
479 * @vf: pointer to the vf info
481 * disable vf mappings
483 static void i40e_disable_vf_mappings(struct i40e_vf
*vf
)
485 struct i40e_pf
*pf
= vf
->pf
;
486 struct i40e_hw
*hw
= &pf
->hw
;
489 /* disable qp mappings */
490 wr32(hw
, I40E_VPLAN_MAPENA(vf
->vf_id
), 0);
491 for (i
= 0; i
< I40E_MAX_VSI_QP
; i
++)
492 wr32(hw
, I40E_VPLAN_QTABLE(i
, vf
->vf_id
),
493 I40E_QUEUE_END_OF_LIST
);
499 * @vf: pointer to the vf info
503 static void i40e_free_vf_res(struct i40e_vf
*vf
)
505 struct i40e_pf
*pf
= vf
->pf
;
506 struct i40e_hw
*hw
= &pf
->hw
;
510 /* free vsi & disconnect it from the parent uplink */
511 if (vf
->lan_vsi_index
) {
512 i40e_vsi_release(pf
->vsi
[vf
->lan_vsi_index
]);
513 vf
->lan_vsi_index
= 0;
516 msix_vf
= pf
->hw
.func_caps
.num_msix_vectors_vf
+ 1;
517 /* disable interrupts so the VF starts in a known state */
518 for (i
= 0; i
< msix_vf
; i
++) {
519 /* format is same for both registers */
521 reg_idx
= I40E_VFINT_DYN_CTL0(vf
->vf_id
);
523 reg_idx
= I40E_VFINT_DYN_CTLN(((msix_vf
- 1) *
526 wr32(hw
, reg_idx
, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK
);
530 /* clear the irq settings */
531 for (i
= 0; i
< msix_vf
; i
++) {
532 /* format is same for both registers */
534 reg_idx
= I40E_VPINT_LNKLST0(vf
->vf_id
);
536 reg_idx
= I40E_VPINT_LNKLSTN(((msix_vf
- 1) *
539 reg
= (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK
|
540 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK
);
541 wr32(hw
, reg_idx
, reg
);
544 /* reset some of the state varibles keeping
545 * track of the resources
547 vf
->num_queue_pairs
= 0;
553 * @vf: pointer to the vf info
555 * allocate vf resources
557 static int i40e_alloc_vf_res(struct i40e_vf
*vf
)
559 struct i40e_pf
*pf
= vf
->pf
;
560 int total_queue_pairs
= 0;
563 /* allocate hw vsi context & associated resources */
564 ret
= i40e_alloc_vsi_res(vf
, I40E_VSI_SRIOV
);
567 total_queue_pairs
+= pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
;
568 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
);
570 /* store the total qps number for the runtime
573 vf
->num_queue_pairs
= total_queue_pairs
;
575 /* vf is now completely initialized */
576 set_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
);
580 i40e_free_vf_res(vf
);
585 #define VF_DEVICE_STATUS 0xAA
586 #define VF_TRANS_PENDING_MASK 0x20
588 * i40e_quiesce_vf_pci
589 * @vf: pointer to the vf structure
591 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
592 * if the transactions never clear.
594 static int i40e_quiesce_vf_pci(struct i40e_vf
*vf
)
596 struct i40e_pf
*pf
= vf
->pf
;
597 struct i40e_hw
*hw
= &pf
->hw
;
601 vf_abs_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
603 wr32(hw
, I40E_PF_PCI_CIAA
,
604 VF_DEVICE_STATUS
| (vf_abs_id
<< I40E_PF_PCI_CIAA_VF_NUM_SHIFT
));
605 for (i
= 0; i
< 100; i
++) {
606 reg
= rd32(hw
, I40E_PF_PCI_CIAD
);
607 if ((reg
& VF_TRANS_PENDING_MASK
) == 0)
616 * @vf: pointer to the vf structure
617 * @flr: VFLR was issued or not
621 void i40e_reset_vf(struct i40e_vf
*vf
, bool flr
)
623 struct i40e_pf
*pf
= vf
->pf
;
624 struct i40e_hw
*hw
= &pf
->hw
;
630 clear_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
);
632 /* In the case of a VFLR, the HW has already reset the VF and we
633 * just need to clean up, so don't hit the VFRTRIG register.
636 /* reset vf using VPGEN_VFRTRIG reg */
637 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
638 reg
|= I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
639 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
643 if (i40e_quiesce_vf_pci(vf
))
644 dev_err(&pf
->pdev
->dev
, "VF %d PCI transactions stuck\n",
647 /* poll VPGEN_VFRSTAT reg to make sure
648 * that reset is complete
650 for (i
= 0; i
< 100; i
++) {
651 /* vf reset requires driver to first reset the
652 * vf & than poll the status register to make sure
653 * that the requested op was completed
657 reg
= rd32(hw
, I40E_VPGEN_VFRSTAT(vf
->vf_id
));
658 if (reg
& I40E_VPGEN_VFRSTAT_VFRD_MASK
) {
665 dev_err(&pf
->pdev
->dev
, "VF reset check timeout on VF %d\n",
667 wr32(hw
, I40E_VFGEN_RSTAT1(vf
->vf_id
), I40E_VFR_COMPLETED
);
668 /* clear the reset bit in the VPGEN_VFRTRIG reg */
669 reg
= rd32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
));
670 reg
&= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
671 wr32(hw
, I40E_VPGEN_VFRTRIG(vf
->vf_id
), reg
);
673 /* On initial reset, we won't have any queues */
674 if (vf
->lan_vsi_index
== 0)
677 i40e_vsi_control_rings(pf
->vsi
[vf
->lan_vsi_index
], false);
679 /* reallocate vf resources to reset the VSI state */
680 i40e_free_vf_res(vf
);
682 i40e_alloc_vf_res(vf
);
683 i40e_enable_vf_mappings(vf
);
685 /* tell the VF the reset is done */
686 wr32(hw
, I40E_VFGEN_RSTAT1(vf
->vf_id
), I40E_VFR_VFACTIVE
);
691 * i40e_vfs_are_assigned
692 * @pf: pointer to the pf structure
694 * Determine if any VFs are assigned to VMs
696 static bool i40e_vfs_are_assigned(struct i40e_pf
*pf
)
698 struct pci_dev
*pdev
= pf
->pdev
;
699 struct pci_dev
*vfdev
;
701 /* loop through all the VFs to see if we own any that are assigned */
702 vfdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, I40E_VF_DEVICE_ID
, NULL
);
704 /* if we don't own it we don't care */
705 if (vfdev
->is_virtfn
&& pci_physfn(vfdev
) == pdev
) {
706 /* if it is assigned we cannot release it */
707 if (vfdev
->dev_flags
& PCI_DEV_FLAGS_ASSIGNED
)
711 vfdev
= pci_get_device(PCI_VENDOR_ID_INTEL
,
718 #ifdef CONFIG_PCI_IOV
721 * i40e_enable_pf_switch_lb
722 * @pf: pointer to the pf structure
724 * enable switch loop back or die - no point in a return value
726 static void i40e_enable_pf_switch_lb(struct i40e_pf
*pf
)
728 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
729 struct i40e_vsi_context ctxt
;
732 ctxt
.seid
= pf
->main_vsi_seid
;
733 ctxt
.pf_num
= pf
->hw
.pf_id
;
735 aq_ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
737 dev_info(&pf
->pdev
->dev
,
738 "%s couldn't get pf vsi config, err %d, aq_err %d\n",
739 __func__
, aq_ret
, pf
->hw
.aq
.asq_last_status
);
742 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
743 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
744 ctxt
.info
.switch_id
|= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
746 aq_ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
748 dev_info(&pf
->pdev
->dev
,
749 "%s: update vsi switch failed, aq_err=%d\n",
750 __func__
, vsi
->back
->hw
.aq
.asq_last_status
);
756 * i40e_disable_pf_switch_lb
757 * @pf: pointer to the pf structure
759 * disable switch loop back or die - no point in a return value
761 static void i40e_disable_pf_switch_lb(struct i40e_pf
*pf
)
763 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
764 struct i40e_vsi_context ctxt
;
767 ctxt
.seid
= pf
->main_vsi_seid
;
768 ctxt
.pf_num
= pf
->hw
.pf_id
;
770 aq_ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
772 dev_info(&pf
->pdev
->dev
,
773 "%s couldn't get pf vsi config, err %d, aq_err %d\n",
774 __func__
, aq_ret
, pf
->hw
.aq
.asq_last_status
);
777 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
778 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
779 ctxt
.info
.switch_id
&= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
781 aq_ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
783 dev_info(&pf
->pdev
->dev
,
784 "%s: update vsi switch failed, aq_err=%d\n",
785 __func__
, vsi
->back
->hw
.aq
.asq_last_status
);
791 * @pf: pointer to the pf structure
795 void i40e_free_vfs(struct i40e_pf
*pf
)
797 struct i40e_hw
*hw
= &pf
->hw
;
798 u32 reg_idx
, bit_idx
;
804 /* Disable interrupt 0 so we don't try to handle the VFLR. */
805 i40e_irq_dynamic_disable_icr0(pf
);
807 mdelay(10); /* let any messages in transit get finished up */
808 /* free up vf resources */
809 tmp
= pf
->num_alloc_vfs
;
810 pf
->num_alloc_vfs
= 0;
811 for (i
= 0; i
< tmp
; i
++) {
812 if (test_bit(I40E_VF_STAT_INIT
, &pf
->vf
[i
].vf_states
))
813 i40e_free_vf_res(&pf
->vf
[i
]);
814 /* disable qp mappings */
815 i40e_disable_vf_mappings(&pf
->vf
[i
]);
821 if (!i40e_vfs_are_assigned(pf
)) {
822 pci_disable_sriov(pf
->pdev
);
823 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
824 * work correctly when SR-IOV gets re-enabled.
826 for (vf_id
= 0; vf_id
< tmp
; vf_id
++) {
827 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) / 32;
828 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) % 32;
829 wr32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
), (1 << bit_idx
));
831 i40e_disable_pf_switch_lb(pf
);
833 dev_warn(&pf
->pdev
->dev
,
834 "unable to disable SR-IOV because VFs are assigned.\n");
837 /* Re-enable interrupt 0. */
838 i40e_irq_dynamic_enable_icr0(pf
);
841 #ifdef CONFIG_PCI_IOV
844 * @pf: pointer to the pf structure
845 * @num_alloc_vfs: number of vfs to allocate
847 * allocate vf resources
849 static int i40e_alloc_vfs(struct i40e_pf
*pf
, u16 num_alloc_vfs
)
854 /* Disable interrupt 0 so we don't try to handle the VFLR. */
855 i40e_irq_dynamic_disable_icr0(pf
);
857 ret
= pci_enable_sriov(pf
->pdev
, num_alloc_vfs
);
859 dev_err(&pf
->pdev
->dev
,
860 "pci_enable_sriov failed with error %d!\n", ret
);
861 pf
->num_alloc_vfs
= 0;
865 /* allocate memory */
866 vfs
= kzalloc(num_alloc_vfs
* sizeof(struct i40e_vf
), GFP_KERNEL
);
872 /* apply default profile */
873 for (i
= 0; i
< num_alloc_vfs
; i
++) {
875 vfs
[i
].parent_type
= I40E_SWITCH_ELEMENT_TYPE_VEB
;
878 /* assign default capabilities */
879 set_bit(I40E_VIRTCHNL_VF_CAP_L2
, &vfs
[i
].vf_caps
);
880 /* vf resources get allocated during reset */
881 i40e_reset_vf(&vfs
[i
], false);
883 /* enable vf vplan_qtable mappings */
884 i40e_enable_vf_mappings(&vfs
[i
]);
887 pf
->num_alloc_vfs
= num_alloc_vfs
;
889 i40e_enable_pf_switch_lb(pf
);
894 /* Re-enable interrupt 0. */
895 i40e_irq_dynamic_enable_icr0(pf
);
901 * i40e_pci_sriov_enable
902 * @pdev: pointer to a pci_dev structure
903 * @num_vfs: number of vfs to allocate
905 * Enable or change the number of VFs
907 static int i40e_pci_sriov_enable(struct pci_dev
*pdev
, int num_vfs
)
909 #ifdef CONFIG_PCI_IOV
910 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
911 int pre_existing_vfs
= pci_num_vf(pdev
);
914 dev_info(&pdev
->dev
, "Allocating %d VFs.\n", num_vfs
);
915 if (pre_existing_vfs
&& pre_existing_vfs
!= num_vfs
)
917 else if (pre_existing_vfs
&& pre_existing_vfs
== num_vfs
)
920 if (num_vfs
> pf
->num_req_vfs
) {
925 err
= i40e_alloc_vfs(pf
, num_vfs
);
927 dev_warn(&pdev
->dev
, "Failed to enable SR-IOV: %d\n", err
);
941 * i40e_pci_sriov_configure
942 * @pdev: pointer to a pci_dev structure
943 * @num_vfs: number of vfs to allocate
945 * Enable or change the number of VFs. Called when the user updates the number
948 int i40e_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
950 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
953 return i40e_pci_sriov_enable(pdev
, num_vfs
);
959 /***********************virtual channel routines******************/
962 * i40e_vc_send_msg_to_vf
963 * @vf: pointer to the vf info
964 * @v_opcode: virtual channel opcode
965 * @v_retval: virtual channel return value
966 * @msg: pointer to the msg buffer
967 * @msglen: msg length
971 static int i40e_vc_send_msg_to_vf(struct i40e_vf
*vf
, u32 v_opcode
,
972 u32 v_retval
, u8
*msg
, u16 msglen
)
974 struct i40e_pf
*pf
= vf
->pf
;
975 struct i40e_hw
*hw
= &pf
->hw
;
976 int true_vf_id
= vf
->vf_id
+ hw
->func_caps
.vf_base_id
;
979 /* single place to detect unsuccessful return values */
981 vf
->num_invalid_msgs
++;
982 dev_err(&pf
->pdev
->dev
, "Failed opcode %d Error: %d\n",
984 if (vf
->num_invalid_msgs
>
985 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED
) {
986 dev_err(&pf
->pdev
->dev
,
987 "Number of invalid messages exceeded for VF %d\n",
989 dev_err(&pf
->pdev
->dev
, "Use PF Control I/F to enable the VF\n");
990 set_bit(I40E_VF_STAT_DISABLED
, &vf
->vf_states
);
993 vf
->num_valid_msgs
++;
996 aq_ret
= i40e_aq_send_msg_to_vf(hw
, true_vf_id
, v_opcode
, v_retval
,
999 dev_err(&pf
->pdev
->dev
,
1000 "Unable to send the message to VF %d aq_err %d\n",
1001 vf
->vf_id
, pf
->hw
.aq
.asq_last_status
);
1009 * i40e_vc_send_resp_to_vf
1010 * @vf: pointer to the vf info
1011 * @opcode: operation code
1012 * @retval: return value
1014 * send resp msg to vf
1016 static int i40e_vc_send_resp_to_vf(struct i40e_vf
*vf
,
1017 enum i40e_virtchnl_ops opcode
,
1020 return i40e_vc_send_msg_to_vf(vf
, opcode
, retval
, NULL
, 0);
1024 * i40e_vc_get_version_msg
1025 * @vf: pointer to the vf info
1027 * called from the vf to request the API version used by the PF
1029 static int i40e_vc_get_version_msg(struct i40e_vf
*vf
)
1031 struct i40e_virtchnl_version_info info
= {
1032 I40E_VIRTCHNL_VERSION_MAJOR
, I40E_VIRTCHNL_VERSION_MINOR
1035 return i40e_vc_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_VERSION
,
1036 I40E_SUCCESS
, (u8
*)&info
,
1038 i40e_virtchnl_version_info
));
1042 * i40e_vc_get_vf_resources_msg
1043 * @vf: pointer to the vf info
1044 * @msg: pointer to the msg buffer
1045 * @msglen: msg length
1047 * called from the vf to request its resources
1049 static int i40e_vc_get_vf_resources_msg(struct i40e_vf
*vf
)
1051 struct i40e_virtchnl_vf_resource
*vfres
= NULL
;
1052 struct i40e_pf
*pf
= vf
->pf
;
1053 i40e_status aq_ret
= 0;
1054 struct i40e_vsi
*vsi
;
1059 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
1060 aq_ret
= I40E_ERR_PARAM
;
1064 len
= (sizeof(struct i40e_virtchnl_vf_resource
) +
1065 sizeof(struct i40e_virtchnl_vsi_resource
) * num_vsis
);
1067 vfres
= kzalloc(len
, GFP_KERNEL
);
1069 aq_ret
= I40E_ERR_NO_MEMORY
;
1074 vfres
->vf_offload_flags
= I40E_VIRTCHNL_VF_OFFLOAD_L2
;
1075 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
1076 if (!vsi
->info
.pvid
)
1077 vfres
->vf_offload_flags
|= I40E_VIRTCHNL_VF_OFFLOAD_VLAN
;
1079 vfres
->num_vsis
= num_vsis
;
1080 vfres
->num_queue_pairs
= vf
->num_queue_pairs
;
1081 vfres
->max_vectors
= pf
->hw
.func_caps
.num_msix_vectors_vf
;
1082 if (vf
->lan_vsi_index
) {
1083 vfres
->vsi_res
[i
].vsi_id
= vf
->lan_vsi_index
;
1084 vfres
->vsi_res
[i
].vsi_type
= I40E_VSI_SRIOV
;
1085 vfres
->vsi_res
[i
].num_queue_pairs
=
1086 pf
->vsi
[vf
->lan_vsi_index
]->num_queue_pairs
;
1087 memcpy(vfres
->vsi_res
[i
].default_mac_addr
,
1088 vf
->default_lan_addr
.addr
, ETH_ALEN
);
1091 set_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
);
1094 /* send the response back to the vf */
1095 ret
= i40e_vc_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_GET_VF_RESOURCES
,
1096 aq_ret
, (u8
*)vfres
, len
);
1103 * i40e_vc_reset_vf_msg
1104 * @vf: pointer to the vf info
1105 * @msg: pointer to the msg buffer
1106 * @msglen: msg length
1108 * called from the vf to reset itself,
1109 * unlike other virtchnl messages, pf driver
1110 * doesn't send the response back to the vf
1112 static void i40e_vc_reset_vf_msg(struct i40e_vf
*vf
)
1114 if (test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
))
1115 i40e_reset_vf(vf
, false);
1119 * i40e_vc_config_promiscuous_mode_msg
1120 * @vf: pointer to the vf info
1121 * @msg: pointer to the msg buffer
1122 * @msglen: msg length
1124 * called from the vf to configure the promiscuous mode of
1127 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf
*vf
,
1128 u8
*msg
, u16 msglen
)
1130 struct i40e_virtchnl_promisc_info
*info
=
1131 (struct i40e_virtchnl_promisc_info
*)msg
;
1132 struct i40e_pf
*pf
= vf
->pf
;
1133 struct i40e_hw
*hw
= &pf
->hw
;
1134 bool allmulti
= false;
1135 bool promisc
= false;
1138 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1139 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1140 !i40e_vc_isvalid_vsi_id(vf
, info
->vsi_id
) ||
1141 (pf
->vsi
[info
->vsi_id
]->type
!= I40E_VSI_FCOE
)) {
1142 aq_ret
= I40E_ERR_PARAM
;
1146 if (info
->flags
& I40E_FLAG_VF_UNICAST_PROMISC
)
1148 aq_ret
= i40e_aq_set_vsi_unicast_promiscuous(hw
, info
->vsi_id
,
1153 if (info
->flags
& I40E_FLAG_VF_MULTICAST_PROMISC
)
1155 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(hw
, info
->vsi_id
,
1159 /* send the response to the vf */
1160 return i40e_vc_send_resp_to_vf(vf
,
1161 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
,
1166 * i40e_vc_config_queues_msg
1167 * @vf: pointer to the vf info
1168 * @msg: pointer to the msg buffer
1169 * @msglen: msg length
1171 * called from the vf to configure the rx/tx
1174 static int i40e_vc_config_queues_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1176 struct i40e_virtchnl_vsi_queue_config_info
*qci
=
1177 (struct i40e_virtchnl_vsi_queue_config_info
*)msg
;
1178 struct i40e_virtchnl_queue_pair_info
*qpi
;
1179 u16 vsi_id
, vsi_queue_id
;
1180 i40e_status aq_ret
= 0;
1183 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1184 aq_ret
= I40E_ERR_PARAM
;
1188 vsi_id
= qci
->vsi_id
;
1189 if (!i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1190 aq_ret
= I40E_ERR_PARAM
;
1193 for (i
= 0; i
< qci
->num_queue_pairs
; i
++) {
1194 qpi
= &qci
->qpair
[i
];
1195 vsi_queue_id
= qpi
->txq
.queue_id
;
1196 if ((qpi
->txq
.vsi_id
!= vsi_id
) ||
1197 (qpi
->rxq
.vsi_id
!= vsi_id
) ||
1198 (qpi
->rxq
.queue_id
!= vsi_queue_id
) ||
1199 !i40e_vc_isvalid_queue_id(vf
, vsi_id
, vsi_queue_id
)) {
1200 aq_ret
= I40E_ERR_PARAM
;
1204 if (i40e_config_vsi_rx_queue(vf
, vsi_id
, vsi_queue_id
,
1206 i40e_config_vsi_tx_queue(vf
, vsi_id
, vsi_queue_id
,
1208 aq_ret
= I40E_ERR_PARAM
;
1214 /* send the response to the vf */
1215 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
,
1220 * i40e_vc_config_irq_map_msg
1221 * @vf: pointer to the vf info
1222 * @msg: pointer to the msg buffer
1223 * @msglen: msg length
1225 * called from the vf to configure the irq to
1228 static int i40e_vc_config_irq_map_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1230 struct i40e_virtchnl_irq_map_info
*irqmap_info
=
1231 (struct i40e_virtchnl_irq_map_info
*)msg
;
1232 struct i40e_virtchnl_vector_map
*map
;
1233 u16 vsi_id
, vsi_queue_id
, vector_id
;
1234 i40e_status aq_ret
= 0;
1235 unsigned long tempmap
;
1238 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1239 aq_ret
= I40E_ERR_PARAM
;
1243 for (i
= 0; i
< irqmap_info
->num_vectors
; i
++) {
1244 map
= &irqmap_info
->vecmap
[i
];
1246 vector_id
= map
->vector_id
;
1247 vsi_id
= map
->vsi_id
;
1248 /* validate msg params */
1249 if (!i40e_vc_isvalid_vector_id(vf
, vector_id
) ||
1250 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1251 aq_ret
= I40E_ERR_PARAM
;
1255 /* lookout for the invalid queue index */
1256 tempmap
= map
->rxq_map
;
1257 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
1258 if (!i40e_vc_isvalid_queue_id(vf
, vsi_id
,
1260 aq_ret
= I40E_ERR_PARAM
;
1265 tempmap
= map
->txq_map
;
1266 for_each_set_bit(vsi_queue_id
, &tempmap
, I40E_MAX_VSI_QP
) {
1267 if (!i40e_vc_isvalid_queue_id(vf
, vsi_id
,
1269 aq_ret
= I40E_ERR_PARAM
;
1274 i40e_config_irq_link_list(vf
, vsi_id
, map
);
1277 /* send the response to the vf */
1278 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
,
1283 * i40e_vc_enable_queues_msg
1284 * @vf: pointer to the vf info
1285 * @msg: pointer to the msg buffer
1286 * @msglen: msg length
1288 * called from the vf to enable all or specific queue(s)
1290 static int i40e_vc_enable_queues_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1292 struct i40e_virtchnl_queue_select
*vqs
=
1293 (struct i40e_virtchnl_queue_select
*)msg
;
1294 struct i40e_pf
*pf
= vf
->pf
;
1295 u16 vsi_id
= vqs
->vsi_id
;
1296 i40e_status aq_ret
= 0;
1298 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1299 aq_ret
= I40E_ERR_PARAM
;
1303 if (!i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1304 aq_ret
= I40E_ERR_PARAM
;
1308 if ((0 == vqs
->rx_queues
) && (0 == vqs
->tx_queues
)) {
1309 aq_ret
= I40E_ERR_PARAM
;
1312 if (i40e_vsi_control_rings(pf
->vsi
[vsi_id
], true))
1313 aq_ret
= I40E_ERR_TIMEOUT
;
1315 /* send the response to the vf */
1316 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_ENABLE_QUEUES
,
1321 * i40e_vc_disable_queues_msg
1322 * @vf: pointer to the vf info
1323 * @msg: pointer to the msg buffer
1324 * @msglen: msg length
1326 * called from the vf to disable all or specific
1329 static int i40e_vc_disable_queues_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1331 struct i40e_virtchnl_queue_select
*vqs
=
1332 (struct i40e_virtchnl_queue_select
*)msg
;
1333 struct i40e_pf
*pf
= vf
->pf
;
1334 u16 vsi_id
= vqs
->vsi_id
;
1335 i40e_status aq_ret
= 0;
1337 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1338 aq_ret
= I40E_ERR_PARAM
;
1342 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
1343 aq_ret
= I40E_ERR_PARAM
;
1347 if ((0 == vqs
->rx_queues
) && (0 == vqs
->tx_queues
)) {
1348 aq_ret
= I40E_ERR_PARAM
;
1351 if (i40e_vsi_control_rings(pf
->vsi
[vsi_id
], false))
1352 aq_ret
= I40E_ERR_TIMEOUT
;
1355 /* send the response to the vf */
1356 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_DISABLE_QUEUES
,
1361 * i40e_vc_get_stats_msg
1362 * @vf: pointer to the vf info
1363 * @msg: pointer to the msg buffer
1364 * @msglen: msg length
1366 * called from the vf to get vsi stats
1368 static int i40e_vc_get_stats_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1370 struct i40e_virtchnl_queue_select
*vqs
=
1371 (struct i40e_virtchnl_queue_select
*)msg
;
1372 struct i40e_pf
*pf
= vf
->pf
;
1373 struct i40e_eth_stats stats
;
1374 i40e_status aq_ret
= 0;
1375 struct i40e_vsi
*vsi
;
1377 memset(&stats
, 0, sizeof(struct i40e_eth_stats
));
1379 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
)) {
1380 aq_ret
= I40E_ERR_PARAM
;
1384 if (!i40e_vc_isvalid_vsi_id(vf
, vqs
->vsi_id
)) {
1385 aq_ret
= I40E_ERR_PARAM
;
1389 vsi
= pf
->vsi
[vqs
->vsi_id
];
1391 aq_ret
= I40E_ERR_PARAM
;
1394 i40e_update_eth_stats(vsi
);
1395 stats
= vsi
->eth_stats
;
1398 /* send the response back to the vf */
1399 return i40e_vc_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_GET_STATS
, aq_ret
,
1400 (u8
*)&stats
, sizeof(stats
));
1404 * i40e_check_vf_permission
1405 * @vf: pointer to the vf info
1406 * @macaddr: pointer to the MAC Address being checked
1408 * Check if the VF has permission to add or delete unicast MAC address
1409 * filters and return error code -EPERM if not. Then check if the
1410 * address filter requested is broadcast or zero and if so return
1411 * an invalid MAC address error code.
1413 static inline int i40e_check_vf_permission(struct i40e_vf
*vf
, u8
*macaddr
)
1415 struct i40e_pf
*pf
= vf
->pf
;
1418 if (is_broadcast_ether_addr(macaddr
) ||
1419 is_zero_ether_addr(macaddr
)) {
1420 dev_err(&pf
->pdev
->dev
, "invalid VF MAC addr %pM\n", macaddr
);
1421 ret
= I40E_ERR_INVALID_MAC_ADDR
;
1422 } else if (vf
->pf_set_mac
&& !is_multicast_ether_addr(macaddr
) &&
1423 !ether_addr_equal(macaddr
, vf
->default_lan_addr
.addr
)) {
1424 /* If the host VMM administrator has set the VF MAC address
1425 * administratively via the ndo_set_vf_mac command then deny
1426 * permission to the VF to add or delete unicast MAC addresses.
1427 * The VF may request to set the MAC address filter already
1428 * assigned to it so do not return an error in that case.
1430 dev_err(&pf
->pdev
->dev
,
1431 "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
1438 * i40e_vc_add_mac_addr_msg
1439 * @vf: pointer to the vf info
1440 * @msg: pointer to the msg buffer
1441 * @msglen: msg length
1443 * add guest mac address filter
1445 static int i40e_vc_add_mac_addr_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1447 struct i40e_virtchnl_ether_addr_list
*al
=
1448 (struct i40e_virtchnl_ether_addr_list
*)msg
;
1449 struct i40e_pf
*pf
= vf
->pf
;
1450 struct i40e_vsi
*vsi
= NULL
;
1451 u16 vsi_id
= al
->vsi_id
;
1452 i40e_status ret
= 0;
1455 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1456 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1457 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1458 ret
= I40E_ERR_PARAM
;
1462 for (i
= 0; i
< al
->num_elements
; i
++) {
1463 ret
= i40e_check_vf_permission(vf
, al
->list
[i
].addr
);
1467 vsi
= pf
->vsi
[vsi_id
];
1469 /* add new addresses to the list */
1470 for (i
= 0; i
< al
->num_elements
; i
++) {
1471 struct i40e_mac_filter
*f
;
1473 f
= i40e_find_mac(vsi
, al
->list
[i
].addr
, true, false);
1475 if (i40e_is_vsi_in_vlan(vsi
))
1476 f
= i40e_put_mac_in_vlan(vsi
, al
->list
[i
].addr
,
1479 f
= i40e_add_filter(vsi
, al
->list
[i
].addr
, -1,
1484 dev_err(&pf
->pdev
->dev
,
1485 "Unable to add VF MAC filter\n");
1486 ret
= I40E_ERR_PARAM
;
1491 /* program the updated filter list */
1492 if (i40e_sync_vsi_filters(vsi
))
1493 dev_err(&pf
->pdev
->dev
, "Unable to program VF MAC filters\n");
1496 /* send the response to the vf */
1497 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
,
1502 * i40e_vc_del_mac_addr_msg
1503 * @vf: pointer to the vf info
1504 * @msg: pointer to the msg buffer
1505 * @msglen: msg length
1507 * remove guest mac address filter
1509 static int i40e_vc_del_mac_addr_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1511 struct i40e_virtchnl_ether_addr_list
*al
=
1512 (struct i40e_virtchnl_ether_addr_list
*)msg
;
1513 struct i40e_pf
*pf
= vf
->pf
;
1514 struct i40e_vsi
*vsi
= NULL
;
1515 u16 vsi_id
= al
->vsi_id
;
1516 i40e_status ret
= 0;
1519 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1520 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1521 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1522 ret
= I40E_ERR_PARAM
;
1526 for (i
= 0; i
< al
->num_elements
; i
++) {
1527 ret
= i40e_check_vf_permission(vf
, al
->list
[i
].addr
);
1531 vsi
= pf
->vsi
[vsi_id
];
1533 /* delete addresses from the list */
1534 for (i
= 0; i
< al
->num_elements
; i
++)
1535 i40e_del_filter(vsi
, al
->list
[i
].addr
,
1536 I40E_VLAN_ANY
, true, false);
1538 /* program the updated filter list */
1539 if (i40e_sync_vsi_filters(vsi
))
1540 dev_err(&pf
->pdev
->dev
, "Unable to program VF MAC filters\n");
1543 /* send the response to the vf */
1544 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
,
1549 * i40e_vc_add_vlan_msg
1550 * @vf: pointer to the vf info
1551 * @msg: pointer to the msg buffer
1552 * @msglen: msg length
1554 * program guest vlan id
1556 static int i40e_vc_add_vlan_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1558 struct i40e_virtchnl_vlan_filter_list
*vfl
=
1559 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
1560 struct i40e_pf
*pf
= vf
->pf
;
1561 struct i40e_vsi
*vsi
= NULL
;
1562 u16 vsi_id
= vfl
->vsi_id
;
1563 i40e_status aq_ret
= 0;
1566 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1567 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1568 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1569 aq_ret
= I40E_ERR_PARAM
;
1573 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1574 if (vfl
->vlan_id
[i
] > I40E_MAX_VLANID
) {
1575 aq_ret
= I40E_ERR_PARAM
;
1576 dev_err(&pf
->pdev
->dev
,
1577 "invalid VF VLAN id %d\n", vfl
->vlan_id
[i
]);
1581 vsi
= pf
->vsi
[vsi_id
];
1582 if (vsi
->info
.pvid
) {
1583 aq_ret
= I40E_ERR_PARAM
;
1587 i40e_vlan_stripping_enable(vsi
);
1588 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1589 /* add new VLAN filter */
1590 int ret
= i40e_vsi_add_vlan(vsi
, vfl
->vlan_id
[i
]);
1592 dev_err(&pf
->pdev
->dev
,
1593 "Unable to add VF vlan filter %d, error %d\n",
1594 vfl
->vlan_id
[i
], ret
);
1598 /* send the response to the vf */
1599 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_ADD_VLAN
, aq_ret
);
1603 * i40e_vc_remove_vlan_msg
1604 * @vf: pointer to the vf info
1605 * @msg: pointer to the msg buffer
1606 * @msglen: msg length
1608 * remove programmed guest vlan id
1610 static int i40e_vc_remove_vlan_msg(struct i40e_vf
*vf
, u8
*msg
, u16 msglen
)
1612 struct i40e_virtchnl_vlan_filter_list
*vfl
=
1613 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
1614 struct i40e_pf
*pf
= vf
->pf
;
1615 struct i40e_vsi
*vsi
= NULL
;
1616 u16 vsi_id
= vfl
->vsi_id
;
1617 i40e_status aq_ret
= 0;
1620 if (!test_bit(I40E_VF_STAT_ACTIVE
, &vf
->vf_states
) ||
1621 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE
, &vf
->vf_caps
) ||
1622 !i40e_vc_isvalid_vsi_id(vf
, vsi_id
)) {
1623 aq_ret
= I40E_ERR_PARAM
;
1627 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1628 if (vfl
->vlan_id
[i
] > I40E_MAX_VLANID
) {
1629 aq_ret
= I40E_ERR_PARAM
;
1634 vsi
= pf
->vsi
[vsi_id
];
1635 if (vsi
->info
.pvid
) {
1636 aq_ret
= I40E_ERR_PARAM
;
1640 for (i
= 0; i
< vfl
->num_elements
; i
++) {
1641 int ret
= i40e_vsi_kill_vlan(vsi
, vfl
->vlan_id
[i
]);
1643 dev_err(&pf
->pdev
->dev
,
1644 "Unable to delete VF vlan filter %d, error %d\n",
1645 vfl
->vlan_id
[i
], ret
);
1649 /* send the response to the vf */
1650 return i40e_vc_send_resp_to_vf(vf
, I40E_VIRTCHNL_OP_DEL_VLAN
, aq_ret
);
1654 * i40e_vc_validate_vf_msg
1655 * @vf: pointer to the vf info
1656 * @msg: pointer to the msg buffer
1657 * @msglen: msg length
1658 * @msghndl: msg handle
1662 static int i40e_vc_validate_vf_msg(struct i40e_vf
*vf
, u32 v_opcode
,
1663 u32 v_retval
, u8
*msg
, u16 msglen
)
1665 bool err_msg_format
= false;
1668 /* Check if VF is disabled. */
1669 if (test_bit(I40E_VF_STAT_DISABLED
, &vf
->vf_states
))
1670 return I40E_ERR_PARAM
;
1672 /* Validate message length. */
1674 case I40E_VIRTCHNL_OP_VERSION
:
1675 valid_len
= sizeof(struct i40e_virtchnl_version_info
);
1677 case I40E_VIRTCHNL_OP_RESET_VF
:
1678 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES
:
1681 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
:
1682 valid_len
= sizeof(struct i40e_virtchnl_txq_info
);
1684 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
:
1685 valid_len
= sizeof(struct i40e_virtchnl_rxq_info
);
1687 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
:
1688 valid_len
= sizeof(struct i40e_virtchnl_vsi_queue_config_info
);
1689 if (msglen
>= valid_len
) {
1690 struct i40e_virtchnl_vsi_queue_config_info
*vqc
=
1691 (struct i40e_virtchnl_vsi_queue_config_info
*)msg
;
1692 valid_len
+= (vqc
->num_queue_pairs
*
1694 i40e_virtchnl_queue_pair_info
));
1695 if (vqc
->num_queue_pairs
== 0)
1696 err_msg_format
= true;
1699 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
:
1700 valid_len
= sizeof(struct i40e_virtchnl_irq_map_info
);
1701 if (msglen
>= valid_len
) {
1702 struct i40e_virtchnl_irq_map_info
*vimi
=
1703 (struct i40e_virtchnl_irq_map_info
*)msg
;
1704 valid_len
+= (vimi
->num_vectors
*
1705 sizeof(struct i40e_virtchnl_vector_map
));
1706 if (vimi
->num_vectors
== 0)
1707 err_msg_format
= true;
1710 case I40E_VIRTCHNL_OP_ENABLE_QUEUES
:
1711 case I40E_VIRTCHNL_OP_DISABLE_QUEUES
:
1712 valid_len
= sizeof(struct i40e_virtchnl_queue_select
);
1714 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
:
1715 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
:
1716 valid_len
= sizeof(struct i40e_virtchnl_ether_addr_list
);
1717 if (msglen
>= valid_len
) {
1718 struct i40e_virtchnl_ether_addr_list
*veal
=
1719 (struct i40e_virtchnl_ether_addr_list
*)msg
;
1720 valid_len
+= veal
->num_elements
*
1721 sizeof(struct i40e_virtchnl_ether_addr
);
1722 if (veal
->num_elements
== 0)
1723 err_msg_format
= true;
1726 case I40E_VIRTCHNL_OP_ADD_VLAN
:
1727 case I40E_VIRTCHNL_OP_DEL_VLAN
:
1728 valid_len
= sizeof(struct i40e_virtchnl_vlan_filter_list
);
1729 if (msglen
>= valid_len
) {
1730 struct i40e_virtchnl_vlan_filter_list
*vfl
=
1731 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
1732 valid_len
+= vfl
->num_elements
* sizeof(u16
);
1733 if (vfl
->num_elements
== 0)
1734 err_msg_format
= true;
1737 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
:
1738 valid_len
= sizeof(struct i40e_virtchnl_promisc_info
);
1740 case I40E_VIRTCHNL_OP_GET_STATS
:
1741 valid_len
= sizeof(struct i40e_virtchnl_queue_select
);
1743 /* These are always errors coming from the VF. */
1744 case I40E_VIRTCHNL_OP_EVENT
:
1745 case I40E_VIRTCHNL_OP_UNKNOWN
:
1750 /* few more checks */
1751 if ((valid_len
!= msglen
) || (err_msg_format
)) {
1752 i40e_vc_send_resp_to_vf(vf
, v_opcode
, I40E_ERR_PARAM
);
1760 * i40e_vc_process_vf_msg
1761 * @pf: pointer to the pf structure
1762 * @vf_id: source vf id
1763 * @msg: pointer to the msg buffer
1764 * @msglen: msg length
1765 * @msghndl: msg handle
1767 * called from the common aeq/arq handler to
1768 * process request from vf
1770 int i40e_vc_process_vf_msg(struct i40e_pf
*pf
, u16 vf_id
, u32 v_opcode
,
1771 u32 v_retval
, u8
*msg
, u16 msglen
)
1773 struct i40e_hw
*hw
= &pf
->hw
;
1774 int local_vf_id
= vf_id
- hw
->func_caps
.vf_base_id
;
1778 pf
->vf_aq_requests
++;
1779 if (local_vf_id
>= pf
->num_alloc_vfs
)
1781 vf
= &(pf
->vf
[local_vf_id
]);
1782 /* perform basic checks on the msg */
1783 ret
= i40e_vc_validate_vf_msg(vf
, v_opcode
, v_retval
, msg
, msglen
);
1786 dev_err(&pf
->pdev
->dev
, "Invalid message from vf %d, opcode %d, len %d\n",
1787 local_vf_id
, v_opcode
, msglen
);
1790 wr32(hw
, I40E_VFGEN_RSTAT1(local_vf_id
), I40E_VFR_VFACTIVE
);
1792 case I40E_VIRTCHNL_OP_VERSION
:
1793 ret
= i40e_vc_get_version_msg(vf
);
1795 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES
:
1796 ret
= i40e_vc_get_vf_resources_msg(vf
);
1798 case I40E_VIRTCHNL_OP_RESET_VF
:
1799 i40e_vc_reset_vf_msg(vf
);
1802 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
:
1803 ret
= i40e_vc_config_promiscuous_mode_msg(vf
, msg
, msglen
);
1805 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
:
1806 ret
= i40e_vc_config_queues_msg(vf
, msg
, msglen
);
1808 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
:
1809 ret
= i40e_vc_config_irq_map_msg(vf
, msg
, msglen
);
1811 case I40E_VIRTCHNL_OP_ENABLE_QUEUES
:
1812 ret
= i40e_vc_enable_queues_msg(vf
, msg
, msglen
);
1814 case I40E_VIRTCHNL_OP_DISABLE_QUEUES
:
1815 ret
= i40e_vc_disable_queues_msg(vf
, msg
, msglen
);
1817 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
:
1818 ret
= i40e_vc_add_mac_addr_msg(vf
, msg
, msglen
);
1820 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
:
1821 ret
= i40e_vc_del_mac_addr_msg(vf
, msg
, msglen
);
1823 case I40E_VIRTCHNL_OP_ADD_VLAN
:
1824 ret
= i40e_vc_add_vlan_msg(vf
, msg
, msglen
);
1826 case I40E_VIRTCHNL_OP_DEL_VLAN
:
1827 ret
= i40e_vc_remove_vlan_msg(vf
, msg
, msglen
);
1829 case I40E_VIRTCHNL_OP_GET_STATS
:
1830 ret
= i40e_vc_get_stats_msg(vf
, msg
, msglen
);
1832 case I40E_VIRTCHNL_OP_UNKNOWN
:
1834 dev_err(&pf
->pdev
->dev
, "Unsupported opcode %d from vf %d\n",
1835 v_opcode
, local_vf_id
);
1836 ret
= i40e_vc_send_resp_to_vf(vf
, v_opcode
,
1837 I40E_ERR_NOT_IMPLEMENTED
);
1845 * i40e_vc_process_vflr_event
1846 * @pf: pointer to the pf structure
1848 * called from the vlfr irq handler to
1849 * free up vf resources and state variables
1851 int i40e_vc_process_vflr_event(struct i40e_pf
*pf
)
1853 u32 reg
, reg_idx
, bit_idx
, vf_id
;
1854 struct i40e_hw
*hw
= &pf
->hw
;
1857 if (!test_bit(__I40E_VFLR_EVENT_PENDING
, &pf
->state
))
1860 clear_bit(__I40E_VFLR_EVENT_PENDING
, &pf
->state
);
1861 for (vf_id
= 0; vf_id
< pf
->num_alloc_vfs
; vf_id
++) {
1862 reg_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) / 32;
1863 bit_idx
= (hw
->func_caps
.vf_base_id
+ vf_id
) % 32;
1864 /* read GLGEN_VFLRSTAT register to find out the flr vfs */
1865 vf
= &pf
->vf
[vf_id
];
1866 reg
= rd32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
));
1867 if (reg
& (1 << bit_idx
)) {
1868 /* clear the bit in GLGEN_VFLRSTAT */
1869 wr32(hw
, I40E_GLGEN_VFLRSTAT(reg_idx
), (1 << bit_idx
));
1871 i40e_reset_vf(vf
, true);
1875 /* re-enable vflr interrupt cause */
1876 reg
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
1877 reg
|= I40E_PFINT_ICR0_ENA_VFLR_MASK
;
1878 wr32(hw
, I40E_PFINT_ICR0_ENA
, reg
);
1885 * i40e_vc_vf_broadcast
1886 * @pf: pointer to the pf structure
1887 * @opcode: operation code
1888 * @retval: return value
1889 * @msg: pointer to the msg buffer
1890 * @msglen: msg length
1892 * send a message to all VFs on a given PF
1894 static void i40e_vc_vf_broadcast(struct i40e_pf
*pf
,
1895 enum i40e_virtchnl_ops v_opcode
,
1896 i40e_status v_retval
, u8
*msg
,
1899 struct i40e_hw
*hw
= &pf
->hw
;
1900 struct i40e_vf
*vf
= pf
->vf
;
1903 for (i
= 0; i
< pf
->num_alloc_vfs
; i
++) {
1904 /* Ignore return value on purpose - a given VF may fail, but
1905 * we need to keep going and send to all of them
1907 i40e_aq_send_msg_to_vf(hw
, vf
->vf_id
, v_opcode
, v_retval
,
1914 * i40e_vc_notify_link_state
1915 * @pf: pointer to the pf structure
1917 * send a link status message to all VFs on a given PF
1919 void i40e_vc_notify_link_state(struct i40e_pf
*pf
)
1921 struct i40e_virtchnl_pf_event pfe
;
1923 pfe
.event
= I40E_VIRTCHNL_EVENT_LINK_CHANGE
;
1924 pfe
.severity
= I40E_PF_EVENT_SEVERITY_INFO
;
1925 pfe
.event_data
.link_event
.link_status
=
1926 pf
->hw
.phy
.link_info
.link_info
& I40E_AQ_LINK_UP
;
1927 pfe
.event_data
.link_event
.link_speed
= pf
->hw
.phy
.link_info
.link_speed
;
1929 i40e_vc_vf_broadcast(pf
, I40E_VIRTCHNL_OP_EVENT
, I40E_SUCCESS
,
1930 (u8
*)&pfe
, sizeof(struct i40e_virtchnl_pf_event
));
1934 * i40e_vc_notify_reset
1935 * @pf: pointer to the pf structure
1937 * indicate a pending reset to all VFs on a given PF
1939 void i40e_vc_notify_reset(struct i40e_pf
*pf
)
1941 struct i40e_virtchnl_pf_event pfe
;
1943 pfe
.event
= I40E_VIRTCHNL_EVENT_RESET_IMPENDING
;
1944 pfe
.severity
= I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM
;
1945 i40e_vc_vf_broadcast(pf
, I40E_VIRTCHNL_OP_EVENT
, I40E_SUCCESS
,
1946 (u8
*)&pfe
, sizeof(struct i40e_virtchnl_pf_event
));
1950 * i40e_vc_notify_vf_reset
1951 * @vf: pointer to the vf structure
1953 * indicate a pending reset to the given VF
1955 void i40e_vc_notify_vf_reset(struct i40e_vf
*vf
)
1957 struct i40e_virtchnl_pf_event pfe
;
1959 pfe
.event
= I40E_VIRTCHNL_EVENT_RESET_IMPENDING
;
1960 pfe
.severity
= I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM
;
1961 i40e_aq_send_msg_to_vf(&vf
->pf
->hw
, vf
->vf_id
, I40E_VIRTCHNL_OP_EVENT
,
1962 I40E_SUCCESS
, (u8
*)&pfe
,
1963 sizeof(struct i40e_virtchnl_pf_event
), NULL
);
1967 * i40e_ndo_set_vf_mac
1968 * @netdev: network interface device structure
1969 * @vf_id: vf identifier
1972 * program vf mac address
1974 int i40e_ndo_set_vf_mac(struct net_device
*netdev
, int vf_id
, u8
*mac
)
1976 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1977 struct i40e_vsi
*vsi
= np
->vsi
;
1978 struct i40e_pf
*pf
= vsi
->back
;
1979 struct i40e_mac_filter
*f
;
1983 /* validate the request */
1984 if (vf_id
>= pf
->num_alloc_vfs
) {
1985 dev_err(&pf
->pdev
->dev
,
1986 "Invalid VF Identifier %d\n", vf_id
);
1991 vf
= &(pf
->vf
[vf_id
]);
1992 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
1993 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
1994 dev_err(&pf
->pdev
->dev
,
1995 "Uninitialized VF %d\n", vf_id
);
2000 if (!is_valid_ether_addr(mac
)) {
2001 dev_err(&pf
->pdev
->dev
,
2002 "Invalid VF ethernet address\n");
2007 /* delete the temporary mac address */
2008 i40e_del_filter(vsi
, vf
->default_lan_addr
.addr
, 0, true, false);
2010 /* add the new mac address */
2011 f
= i40e_add_filter(vsi
, mac
, 0, true, false);
2013 dev_err(&pf
->pdev
->dev
,
2014 "Unable to add VF ucast filter\n");
2019 dev_info(&pf
->pdev
->dev
, "Setting MAC %pM on VF %d\n", mac
, vf_id
);
2020 /* program mac filter */
2021 if (i40e_sync_vsi_filters(vsi
)) {
2022 dev_err(&pf
->pdev
->dev
, "Unable to program ucast filters\n");
2026 memcpy(vf
->default_lan_addr
.addr
, mac
, ETH_ALEN
);
2027 vf
->pf_set_mac
= true;
2028 dev_info(&pf
->pdev
->dev
, "Reload the VF driver to make this change effective.\n");
2036 * i40e_ndo_set_vf_port_vlan
2037 * @netdev: network interface device structure
2038 * @vf_id: vf identifier
2039 * @vlan_id: mac address
2040 * @qos: priority setting
2042 * program vf vlan id and/or qos
2044 int i40e_ndo_set_vf_port_vlan(struct net_device
*netdev
,
2045 int vf_id
, u16 vlan_id
, u8 qos
)
2047 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2048 struct i40e_pf
*pf
= np
->vsi
->back
;
2049 struct i40e_vsi
*vsi
;
2053 /* validate the request */
2054 if (vf_id
>= pf
->num_alloc_vfs
) {
2055 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
2060 if ((vlan_id
> I40E_MAX_VLANID
) || (qos
> 7)) {
2061 dev_err(&pf
->pdev
->dev
, "Invalid VF Parameters\n");
2066 vf
= &(pf
->vf
[vf_id
]);
2067 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
2068 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
2069 dev_err(&pf
->pdev
->dev
, "Uninitialized VF %d\n", vf_id
);
2074 if (vsi
->info
.pvid
) {
2076 ret
= i40e_vsi_kill_vlan(vsi
, (le16_to_cpu(vsi
->info
.pvid
) &
2079 dev_info(&vsi
->back
->pdev
->dev
,
2080 "remove VLAN failed, ret=%d, aq_err=%d\n",
2081 ret
, pf
->hw
.aq
.asq_last_status
);
2085 ret
= i40e_vsi_add_pvid(vsi
,
2086 vlan_id
| (qos
<< I40E_VLAN_PRIORITY_SHIFT
));
2088 i40e_vsi_remove_pvid(vsi
);
2091 dev_info(&pf
->pdev
->dev
, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2092 vlan_id
, qos
, vf_id
);
2094 /* add new VLAN filter */
2095 ret
= i40e_vsi_add_vlan(vsi
, vlan_id
);
2097 dev_info(&vsi
->back
->pdev
->dev
,
2098 "add VF VLAN failed, ret=%d aq_err=%d\n", ret
,
2099 vsi
->back
->hw
.aq
.asq_last_status
);
2105 dev_err(&pf
->pdev
->dev
, "Unable to update VF vsi context\n");
2108 /* The Port VLAN needs to be saved across resets the same as the
2109 * default LAN MAC address.
2111 vf
->port_vlan_id
= le16_to_cpu(vsi
->info
.pvid
);
2119 * i40e_ndo_set_vf_bw
2120 * @netdev: network interface device structure
2121 * @vf_id: vf identifier
2124 * configure vf tx rate
2126 int i40e_ndo_set_vf_bw(struct net_device
*netdev
, int vf_id
, int tx_rate
)
2132 * i40e_ndo_get_vf_config
2133 * @netdev: network interface device structure
2134 * @vf_id: vf identifier
2135 * @ivi: vf configuration structure
2137 * return vf configuration
2139 int i40e_ndo_get_vf_config(struct net_device
*netdev
,
2140 int vf_id
, struct ifla_vf_info
*ivi
)
2142 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2143 struct i40e_vsi
*vsi
= np
->vsi
;
2144 struct i40e_pf
*pf
= vsi
->back
;
2148 /* validate the request */
2149 if (vf_id
>= pf
->num_alloc_vfs
) {
2150 dev_err(&pf
->pdev
->dev
, "Invalid VF Identifier %d\n", vf_id
);
2155 vf
= &(pf
->vf
[vf_id
]);
2156 /* first vsi is always the LAN vsi */
2157 vsi
= pf
->vsi
[vf
->lan_vsi_index
];
2158 if (!test_bit(I40E_VF_STAT_INIT
, &vf
->vf_states
)) {
2159 dev_err(&pf
->pdev
->dev
, "Uninitialized VF %d\n", vf_id
);
2166 memcpy(&ivi
->mac
, vf
->default_lan_addr
.addr
, ETH_ALEN
);
2169 ivi
->vlan
= le16_to_cpu(vsi
->info
.pvid
) & I40E_VLAN_MASK
;
2170 ivi
->qos
= (le16_to_cpu(vsi
->info
.pvid
) & I40E_PRIORITY_MASK
) >>
2171 I40E_VLAN_PRIORITY_SHIFT
;