i40e: Fix GPL header
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_virtchnl_pf.c
1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27 #include "i40e.h"
28
29 /***********************misc routines*****************************/
30
31 /**
32 * i40e_vc_isvalid_vsi_id
33 * @vf: pointer to the vf info
34 * @vsi_id: vf relative vsi id
35 *
36 * check for the valid vsi id
37 **/
38 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
39 {
40 struct i40e_pf *pf = vf->pf;
41
42 return pf->vsi[vsi_id]->vf_id == vf->vf_id;
43 }
44
45 /**
46 * i40e_vc_isvalid_queue_id
47 * @vf: pointer to the vf info
48 * @vsi_id: vsi id
49 * @qid: vsi relative queue id
50 *
51 * check for the valid queue id
52 **/
53 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
54 u8 qid)
55 {
56 struct i40e_pf *pf = vf->pf;
57
58 return qid < pf->vsi[vsi_id]->num_queue_pairs;
59 }
60
61 /**
62 * i40e_vc_isvalid_vector_id
63 * @vf: pointer to the vf info
64 * @vector_id: vf relative vector id
65 *
66 * check for the valid vector id
67 **/
68 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
69 {
70 struct i40e_pf *pf = vf->pf;
71
72 return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
73 }
74
75 /***********************vf resource mgmt routines*****************/
76
77 /**
78 * i40e_vc_get_pf_queue_id
79 * @vf: pointer to the vf info
80 * @vsi_idx: index of VSI in PF struct
81 * @vsi_queue_id: vsi relative queue id
82 *
83 * return pf relative queue id
84 **/
85 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
86 u8 vsi_queue_id)
87 {
88 struct i40e_pf *pf = vf->pf;
89 struct i40e_vsi *vsi = pf->vsi[vsi_idx];
90 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
91
92 if (le16_to_cpu(vsi->info.mapping_flags) &
93 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
94 pf_queue_id =
95 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
96 else
97 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
98 vsi_queue_id;
99
100 return pf_queue_id;
101 }
102
103 /**
104 * i40e_config_irq_link_list
105 * @vf: pointer to the vf info
106 * @vsi_idx: index of VSI in PF struct
107 * @vecmap: irq map info
108 *
109 * configure irq link list from the map
110 **/
111 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
112 struct i40e_virtchnl_vector_map *vecmap)
113 {
114 unsigned long linklistmap = 0, tempmap;
115 struct i40e_pf *pf = vf->pf;
116 struct i40e_hw *hw = &pf->hw;
117 u16 vsi_queue_id, pf_queue_id;
118 enum i40e_queue_type qtype;
119 u16 next_q, vector_id;
120 u32 reg, reg_idx;
121 u16 itr_idx = 0;
122
123 vector_id = vecmap->vector_id;
124 /* setup the head */
125 if (0 == vector_id)
126 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
127 else
128 reg_idx = I40E_VPINT_LNKLSTN(
129 (pf->hw.func_caps.num_msix_vectors_vf
130 * vf->vf_id) + (vector_id - 1));
131
132 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
133 /* Special case - No queues mapped on this vector */
134 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
135 goto irq_list_done;
136 }
137 tempmap = vecmap->rxq_map;
138 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
139 linklistmap |= (1 <<
140 (I40E_VIRTCHNL_SUPPORTED_QTYPES *
141 vsi_queue_id));
142 }
143
144 tempmap = vecmap->txq_map;
145 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
146 linklistmap |= (1 <<
147 (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
148 + 1));
149 }
150
151 next_q = find_first_bit(&linklistmap,
152 (I40E_MAX_VSI_QP *
153 I40E_VIRTCHNL_SUPPORTED_QTYPES));
154 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
155 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
156 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
157 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
158
159 wr32(hw, reg_idx, reg);
160
161 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
162 switch (qtype) {
163 case I40E_QUEUE_TYPE_RX:
164 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
165 itr_idx = vecmap->rxitr_idx;
166 break;
167 case I40E_QUEUE_TYPE_TX:
168 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
169 itr_idx = vecmap->txitr_idx;
170 break;
171 default:
172 break;
173 }
174
175 next_q = find_next_bit(&linklistmap,
176 (I40E_MAX_VSI_QP *
177 I40E_VIRTCHNL_SUPPORTED_QTYPES),
178 next_q + 1);
179 if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
180 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
181 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
182 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
183 vsi_queue_id);
184 } else {
185 pf_queue_id = I40E_QUEUE_END_OF_LIST;
186 qtype = 0;
187 }
188
189 /* format for the RQCTL & TQCTL regs is same */
190 reg = (vector_id) |
191 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
192 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
193 (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
194 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
195 wr32(hw, reg_idx, reg);
196 }
197
198 irq_list_done:
199 i40e_flush(hw);
200 }
201
202 /**
203 * i40e_config_vsi_tx_queue
204 * @vf: pointer to the vf info
205 * @vsi_idx: index of VSI in PF struct
206 * @vsi_queue_id: vsi relative queue index
207 * @info: config. info
208 *
209 * configure tx queue
210 **/
211 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
212 u16 vsi_queue_id,
213 struct i40e_virtchnl_txq_info *info)
214 {
215 struct i40e_pf *pf = vf->pf;
216 struct i40e_hw *hw = &pf->hw;
217 struct i40e_hmc_obj_txq tx_ctx;
218 u16 pf_queue_id;
219 u32 qtx_ctl;
220 int ret = 0;
221
222 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
223
224 /* clear the context structure first */
225 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
226
227 /* only set the required fields */
228 tx_ctx.base = info->dma_ring_addr / 128;
229 tx_ctx.qlen = info->ring_len;
230 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
231 tx_ctx.rdylist_act = 0;
232
233 /* clear the context in the HMC */
234 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
235 if (ret) {
236 dev_err(&pf->pdev->dev,
237 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
238 pf_queue_id, ret);
239 ret = -ENOENT;
240 goto error_context;
241 }
242
243 /* set the context in the HMC */
244 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
245 if (ret) {
246 dev_err(&pf->pdev->dev,
247 "Failed to set VF LAN Tx queue context %d error: %d\n",
248 pf_queue_id, ret);
249 ret = -ENOENT;
250 goto error_context;
251 }
252
253 /* associate this queue with the PCI VF function */
254 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
255 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
256 & I40E_QTX_CTL_PF_INDX_MASK);
257 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
258 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
259 & I40E_QTX_CTL_VFVM_INDX_MASK);
260 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
261 i40e_flush(hw);
262
263 error_context:
264 return ret;
265 }
266
267 /**
268 * i40e_config_vsi_rx_queue
269 * @vf: pointer to the vf info
270 * @vsi_idx: index of VSI in PF struct
271 * @vsi_queue_id: vsi relative queue index
272 * @info: config. info
273 *
274 * configure rx queue
275 **/
276 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
277 u16 vsi_queue_id,
278 struct i40e_virtchnl_rxq_info *info)
279 {
280 struct i40e_pf *pf = vf->pf;
281 struct i40e_hw *hw = &pf->hw;
282 struct i40e_hmc_obj_rxq rx_ctx;
283 u16 pf_queue_id;
284 int ret = 0;
285
286 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
287
288 /* clear the context structure first */
289 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
290
291 /* only set the required fields */
292 rx_ctx.base = info->dma_ring_addr / 128;
293 rx_ctx.qlen = info->ring_len;
294
295 if (info->splithdr_enabled) {
296 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
297 I40E_RX_SPLIT_IP |
298 I40E_RX_SPLIT_TCP_UDP |
299 I40E_RX_SPLIT_SCTP;
300 /* header length validation */
301 if (info->hdr_size > ((2 * 1024) - 64)) {
302 ret = -EINVAL;
303 goto error_param;
304 }
305 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
306
307 /* set splitalways mode 10b */
308 rx_ctx.dtype = 0x2;
309 }
310
311 /* databuffer length validation */
312 if (info->databuffer_size > ((16 * 1024) - 128)) {
313 ret = -EINVAL;
314 goto error_param;
315 }
316 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
317
318 /* max pkt. length validation */
319 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
320 ret = -EINVAL;
321 goto error_param;
322 }
323 rx_ctx.rxmax = info->max_pkt_size;
324
325 /* enable 32bytes desc always */
326 rx_ctx.dsize = 1;
327
328 /* default values */
329 rx_ctx.tphrdesc_ena = 1;
330 rx_ctx.tphwdesc_ena = 1;
331 rx_ctx.tphdata_ena = 1;
332 rx_ctx.tphhead_ena = 1;
333 rx_ctx.lrxqthresh = 2;
334 rx_ctx.crcstrip = 1;
335
336 /* clear the context in the HMC */
337 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
338 if (ret) {
339 dev_err(&pf->pdev->dev,
340 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
341 pf_queue_id, ret);
342 ret = -ENOENT;
343 goto error_param;
344 }
345
346 /* set the context in the HMC */
347 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
348 if (ret) {
349 dev_err(&pf->pdev->dev,
350 "Failed to set VF LAN Rx queue context %d error: %d\n",
351 pf_queue_id, ret);
352 ret = -ENOENT;
353 goto error_param;
354 }
355
356 error_param:
357 return ret;
358 }
359
360 /**
361 * i40e_alloc_vsi_res
362 * @vf: pointer to the vf info
363 * @type: type of VSI to allocate
364 *
365 * alloc vf vsi context & resources
366 **/
367 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
368 {
369 struct i40e_mac_filter *f = NULL;
370 struct i40e_pf *pf = vf->pf;
371 struct i40e_vsi *vsi;
372 int ret = 0;
373
374 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
375
376 if (!vsi) {
377 dev_err(&pf->pdev->dev,
378 "add vsi failed for vf %d, aq_err %d\n",
379 vf->vf_id, pf->hw.aq.asq_last_status);
380 ret = -ENOENT;
381 goto error_alloc_vsi_res;
382 }
383 if (type == I40E_VSI_SRIOV) {
384 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
385 vf->lan_vsi_index = vsi->idx;
386 vf->lan_vsi_id = vsi->id;
387 dev_info(&pf->pdev->dev,
388 "LAN VSI index %d, VSI id %d\n",
389 vsi->idx, vsi->id);
390 /* If the port VLAN has been configured and then the
391 * VF driver was removed then the VSI port VLAN
392 * configuration was destroyed. Check if there is
393 * a port VLAN and restore the VSI configuration if
394 * needed.
395 */
396 if (vf->port_vlan_id)
397 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
398 f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
399 vf->port_vlan_id, true, false);
400 if (!f)
401 dev_info(&pf->pdev->dev,
402 "Could not allocate VF MAC addr\n");
403 f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
404 true, false);
405 if (!f)
406 dev_info(&pf->pdev->dev,
407 "Could not allocate VF broadcast filter\n");
408 }
409
410 if (!f) {
411 dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
412 ret = -ENOMEM;
413 goto error_alloc_vsi_res;
414 }
415
416 /* program mac filter */
417 ret = i40e_sync_vsi_filters(vsi);
418 if (ret) {
419 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
420 goto error_alloc_vsi_res;
421 }
422
423 error_alloc_vsi_res:
424 return ret;
425 }
426
427 /**
428 * i40e_enable_vf_mappings
429 * @vf: pointer to the vf info
430 *
431 * enable vf mappings
432 **/
433 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
434 {
435 struct i40e_pf *pf = vf->pf;
436 struct i40e_hw *hw = &pf->hw;
437 u32 reg, total_queue_pairs = 0;
438 int j;
439
440 /* Tell the hardware we're using noncontiguous mapping. HW requires
441 * that VF queues be mapped using this method, even when they are
442 * contiguous in real life
443 */
444 wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
445 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
446
447 /* enable VF vplan_qtable mappings */
448 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
449 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
450
451 /* map PF queues to VF queues */
452 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
453 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
454 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
455 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
456 total_queue_pairs++;
457 }
458
459 /* map PF queues to VSI */
460 for (j = 0; j < 7; j++) {
461 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
462 reg = 0x07FF07FF; /* unused */
463 } else {
464 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
465 j * 2);
466 reg = qid;
467 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
468 (j * 2) + 1);
469 reg |= qid << 16;
470 }
471 wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
472 }
473
474 i40e_flush(hw);
475 }
476
477 /**
478 * i40e_disable_vf_mappings
479 * @vf: pointer to the vf info
480 *
481 * disable vf mappings
482 **/
483 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
484 {
485 struct i40e_pf *pf = vf->pf;
486 struct i40e_hw *hw = &pf->hw;
487 int i;
488
489 /* disable qp mappings */
490 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
491 for (i = 0; i < I40E_MAX_VSI_QP; i++)
492 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
493 I40E_QUEUE_END_OF_LIST);
494 i40e_flush(hw);
495 }
496
497 /**
498 * i40e_free_vf_res
499 * @vf: pointer to the vf info
500 *
501 * free vf resources
502 **/
503 static void i40e_free_vf_res(struct i40e_vf *vf)
504 {
505 struct i40e_pf *pf = vf->pf;
506 struct i40e_hw *hw = &pf->hw;
507 u32 reg_idx, reg;
508 int i, msix_vf;
509
510 /* free vsi & disconnect it from the parent uplink */
511 if (vf->lan_vsi_index) {
512 i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
513 vf->lan_vsi_index = 0;
514 vf->lan_vsi_id = 0;
515 }
516 msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1;
517 /* disable interrupts so the VF starts in a known state */
518 for (i = 0; i < msix_vf; i++) {
519 /* format is same for both registers */
520 if (0 == i)
521 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
522 else
523 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
524 (vf->vf_id))
525 + (i - 1));
526 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
527 i40e_flush(hw);
528 }
529
530 /* clear the irq settings */
531 for (i = 0; i < msix_vf; i++) {
532 /* format is same for both registers */
533 if (0 == i)
534 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
535 else
536 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
537 (vf->vf_id))
538 + (i - 1));
539 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
540 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
541 wr32(hw, reg_idx, reg);
542 i40e_flush(hw);
543 }
544 /* reset some of the state varibles keeping
545 * track of the resources
546 */
547 vf->num_queue_pairs = 0;
548 vf->vf_states = 0;
549 }
550
551 /**
552 * i40e_alloc_vf_res
553 * @vf: pointer to the vf info
554 *
555 * allocate vf resources
556 **/
557 static int i40e_alloc_vf_res(struct i40e_vf *vf)
558 {
559 struct i40e_pf *pf = vf->pf;
560 int total_queue_pairs = 0;
561 int ret;
562
563 /* allocate hw vsi context & associated resources */
564 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
565 if (ret)
566 goto error_alloc;
567 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
568 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
569
570 /* store the total qps number for the runtime
571 * vf req validation
572 */
573 vf->num_queue_pairs = total_queue_pairs;
574
575 /* vf is now completely initialized */
576 set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
577
578 error_alloc:
579 if (ret)
580 i40e_free_vf_res(vf);
581
582 return ret;
583 }
584
585 #define VF_DEVICE_STATUS 0xAA
586 #define VF_TRANS_PENDING_MASK 0x20
587 /**
588 * i40e_quiesce_vf_pci
589 * @vf: pointer to the vf structure
590 *
591 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
592 * if the transactions never clear.
593 **/
594 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
595 {
596 struct i40e_pf *pf = vf->pf;
597 struct i40e_hw *hw = &pf->hw;
598 int vf_abs_id, i;
599 u32 reg;
600
601 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
602
603 wr32(hw, I40E_PF_PCI_CIAA,
604 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
605 for (i = 0; i < 100; i++) {
606 reg = rd32(hw, I40E_PF_PCI_CIAD);
607 if ((reg & VF_TRANS_PENDING_MASK) == 0)
608 return 0;
609 udelay(1);
610 }
611 return -EIO;
612 }
613
614 /**
615 * i40e_reset_vf
616 * @vf: pointer to the vf structure
617 * @flr: VFLR was issued or not
618 *
619 * reset the vf
620 **/
621 void i40e_reset_vf(struct i40e_vf *vf, bool flr)
622 {
623 struct i40e_pf *pf = vf->pf;
624 struct i40e_hw *hw = &pf->hw;
625 bool rsd = false;
626 int i;
627 u32 reg;
628
629 /* warn the VF */
630 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
631
632 /* In the case of a VFLR, the HW has already reset the VF and we
633 * just need to clean up, so don't hit the VFRTRIG register.
634 */
635 if (!flr) {
636 /* reset vf using VPGEN_VFRTRIG reg */
637 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
638 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
639 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
640 i40e_flush(hw);
641 }
642
643 if (i40e_quiesce_vf_pci(vf))
644 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
645 vf->vf_id);
646
647 /* poll VPGEN_VFRSTAT reg to make sure
648 * that reset is complete
649 */
650 for (i = 0; i < 100; i++) {
651 /* vf reset requires driver to first reset the
652 * vf & than poll the status register to make sure
653 * that the requested op was completed
654 * successfully
655 */
656 udelay(10);
657 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
658 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
659 rsd = true;
660 break;
661 }
662 }
663
664 if (!rsd)
665 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
666 vf->vf_id);
667 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
668 /* clear the reset bit in the VPGEN_VFRTRIG reg */
669 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
670 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
671 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
672
673 /* On initial reset, we won't have any queues */
674 if (vf->lan_vsi_index == 0)
675 goto complete_reset;
676
677 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
678 complete_reset:
679 /* reallocate vf resources to reset the VSI state */
680 i40e_free_vf_res(vf);
681 mdelay(10);
682 i40e_alloc_vf_res(vf);
683 i40e_enable_vf_mappings(vf);
684
685 /* tell the VF the reset is done */
686 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
687 i40e_flush(hw);
688 }
689
690 /**
691 * i40e_vfs_are_assigned
692 * @pf: pointer to the pf structure
693 *
694 * Determine if any VFs are assigned to VMs
695 **/
696 static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
697 {
698 struct pci_dev *pdev = pf->pdev;
699 struct pci_dev *vfdev;
700
701 /* loop through all the VFs to see if we own any that are assigned */
702 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
703 while (vfdev) {
704 /* if we don't own it we don't care */
705 if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
706 /* if it is assigned we cannot release it */
707 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
708 return true;
709 }
710
711 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
712 I40E_VF_DEVICE_ID,
713 vfdev);
714 }
715
716 return false;
717 }
718 #ifdef CONFIG_PCI_IOV
719
720 /**
721 * i40e_enable_pf_switch_lb
722 * @pf: pointer to the pf structure
723 *
724 * enable switch loop back or die - no point in a return value
725 **/
726 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
727 {
728 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
729 struct i40e_vsi_context ctxt;
730 int aq_ret;
731
732 ctxt.seid = pf->main_vsi_seid;
733 ctxt.pf_num = pf->hw.pf_id;
734 ctxt.vf_num = 0;
735 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
736 if (aq_ret) {
737 dev_info(&pf->pdev->dev,
738 "%s couldn't get pf vsi config, err %d, aq_err %d\n",
739 __func__, aq_ret, pf->hw.aq.asq_last_status);
740 return;
741 }
742 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
743 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
744 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
745
746 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
747 if (aq_ret) {
748 dev_info(&pf->pdev->dev,
749 "%s: update vsi switch failed, aq_err=%d\n",
750 __func__, vsi->back->hw.aq.asq_last_status);
751 }
752 }
753 #endif
754
755 /**
756 * i40e_disable_pf_switch_lb
757 * @pf: pointer to the pf structure
758 *
759 * disable switch loop back or die - no point in a return value
760 **/
761 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
762 {
763 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
764 struct i40e_vsi_context ctxt;
765 int aq_ret;
766
767 ctxt.seid = pf->main_vsi_seid;
768 ctxt.pf_num = pf->hw.pf_id;
769 ctxt.vf_num = 0;
770 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
771 if (aq_ret) {
772 dev_info(&pf->pdev->dev,
773 "%s couldn't get pf vsi config, err %d, aq_err %d\n",
774 __func__, aq_ret, pf->hw.aq.asq_last_status);
775 return;
776 }
777 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
778 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
779 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
780
781 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
782 if (aq_ret) {
783 dev_info(&pf->pdev->dev,
784 "%s: update vsi switch failed, aq_err=%d\n",
785 __func__, vsi->back->hw.aq.asq_last_status);
786 }
787 }
788
789 /**
790 * i40e_free_vfs
791 * @pf: pointer to the pf structure
792 *
793 * free vf resources
794 **/
795 void i40e_free_vfs(struct i40e_pf *pf)
796 {
797 struct i40e_hw *hw = &pf->hw;
798 u32 reg_idx, bit_idx;
799 int i, tmp, vf_id;
800
801 if (!pf->vf)
802 return;
803
804 /* Disable interrupt 0 so we don't try to handle the VFLR. */
805 i40e_irq_dynamic_disable_icr0(pf);
806
807 mdelay(10); /* let any messages in transit get finished up */
808 /* free up vf resources */
809 tmp = pf->num_alloc_vfs;
810 pf->num_alloc_vfs = 0;
811 for (i = 0; i < tmp; i++) {
812 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
813 i40e_free_vf_res(&pf->vf[i]);
814 /* disable qp mappings */
815 i40e_disable_vf_mappings(&pf->vf[i]);
816 }
817
818 kfree(pf->vf);
819 pf->vf = NULL;
820
821 if (!i40e_vfs_are_assigned(pf)) {
822 pci_disable_sriov(pf->pdev);
823 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
824 * work correctly when SR-IOV gets re-enabled.
825 */
826 for (vf_id = 0; vf_id < tmp; vf_id++) {
827 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
828 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
829 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
830 }
831 i40e_disable_pf_switch_lb(pf);
832 } else {
833 dev_warn(&pf->pdev->dev,
834 "unable to disable SR-IOV because VFs are assigned.\n");
835 }
836
837 /* Re-enable interrupt 0. */
838 i40e_irq_dynamic_enable_icr0(pf);
839 }
840
841 #ifdef CONFIG_PCI_IOV
842 /**
843 * i40e_alloc_vfs
844 * @pf: pointer to the pf structure
845 * @num_alloc_vfs: number of vfs to allocate
846 *
847 * allocate vf resources
848 **/
849 static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
850 {
851 struct i40e_vf *vfs;
852 int i, ret = 0;
853
854 /* Disable interrupt 0 so we don't try to handle the VFLR. */
855 i40e_irq_dynamic_disable_icr0(pf);
856
857 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
858 if (ret) {
859 dev_err(&pf->pdev->dev,
860 "pci_enable_sriov failed with error %d!\n", ret);
861 pf->num_alloc_vfs = 0;
862 goto err_iov;
863 }
864
865 /* allocate memory */
866 vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
867 if (!vfs) {
868 ret = -ENOMEM;
869 goto err_alloc;
870 }
871
872 /* apply default profile */
873 for (i = 0; i < num_alloc_vfs; i++) {
874 vfs[i].pf = pf;
875 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
876 vfs[i].vf_id = i;
877
878 /* assign default capabilities */
879 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
880 /* vf resources get allocated during reset */
881 i40e_reset_vf(&vfs[i], false);
882
883 /* enable vf vplan_qtable mappings */
884 i40e_enable_vf_mappings(&vfs[i]);
885 }
886 pf->vf = vfs;
887 pf->num_alloc_vfs = num_alloc_vfs;
888
889 i40e_enable_pf_switch_lb(pf);
890 err_alloc:
891 if (ret)
892 i40e_free_vfs(pf);
893 err_iov:
894 /* Re-enable interrupt 0. */
895 i40e_irq_dynamic_enable_icr0(pf);
896 return ret;
897 }
898
899 #endif
900 /**
901 * i40e_pci_sriov_enable
902 * @pdev: pointer to a pci_dev structure
903 * @num_vfs: number of vfs to allocate
904 *
905 * Enable or change the number of VFs
906 **/
907 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
908 {
909 #ifdef CONFIG_PCI_IOV
910 struct i40e_pf *pf = pci_get_drvdata(pdev);
911 int pre_existing_vfs = pci_num_vf(pdev);
912 int err = 0;
913
914 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
915 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
916 i40e_free_vfs(pf);
917 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
918 goto out;
919
920 if (num_vfs > pf->num_req_vfs) {
921 err = -EPERM;
922 goto err_out;
923 }
924
925 err = i40e_alloc_vfs(pf, num_vfs);
926 if (err) {
927 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
928 goto err_out;
929 }
930
931 out:
932 return num_vfs;
933
934 err_out:
935 return err;
936 #endif
937 return 0;
938 }
939
940 /**
941 * i40e_pci_sriov_configure
942 * @pdev: pointer to a pci_dev structure
943 * @num_vfs: number of vfs to allocate
944 *
945 * Enable or change the number of VFs. Called when the user updates the number
946 * of VFs in sysfs.
947 **/
948 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
949 {
950 struct i40e_pf *pf = pci_get_drvdata(pdev);
951
952 if (num_vfs)
953 return i40e_pci_sriov_enable(pdev, num_vfs);
954
955 i40e_free_vfs(pf);
956 return 0;
957 }
958
959 /***********************virtual channel routines******************/
960
961 /**
962 * i40e_vc_send_msg_to_vf
963 * @vf: pointer to the vf info
964 * @v_opcode: virtual channel opcode
965 * @v_retval: virtual channel return value
966 * @msg: pointer to the msg buffer
967 * @msglen: msg length
968 *
969 * send msg to vf
970 **/
971 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
972 u32 v_retval, u8 *msg, u16 msglen)
973 {
974 struct i40e_pf *pf = vf->pf;
975 struct i40e_hw *hw = &pf->hw;
976 int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
977 i40e_status aq_ret;
978
979 /* single place to detect unsuccessful return values */
980 if (v_retval) {
981 vf->num_invalid_msgs++;
982 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
983 v_opcode, v_retval);
984 if (vf->num_invalid_msgs >
985 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
986 dev_err(&pf->pdev->dev,
987 "Number of invalid messages exceeded for VF %d\n",
988 vf->vf_id);
989 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
990 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
991 }
992 } else {
993 vf->num_valid_msgs++;
994 }
995
996 aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval,
997 msg, msglen, NULL);
998 if (aq_ret) {
999 dev_err(&pf->pdev->dev,
1000 "Unable to send the message to VF %d aq_err %d\n",
1001 vf->vf_id, pf->hw.aq.asq_last_status);
1002 return -EIO;
1003 }
1004
1005 return 0;
1006 }
1007
1008 /**
1009 * i40e_vc_send_resp_to_vf
1010 * @vf: pointer to the vf info
1011 * @opcode: operation code
1012 * @retval: return value
1013 *
1014 * send resp msg to vf
1015 **/
1016 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1017 enum i40e_virtchnl_ops opcode,
1018 i40e_status retval)
1019 {
1020 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1021 }
1022
1023 /**
1024 * i40e_vc_get_version_msg
1025 * @vf: pointer to the vf info
1026 *
1027 * called from the vf to request the API version used by the PF
1028 **/
1029 static int i40e_vc_get_version_msg(struct i40e_vf *vf)
1030 {
1031 struct i40e_virtchnl_version_info info = {
1032 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
1033 };
1034
1035 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
1036 I40E_SUCCESS, (u8 *)&info,
1037 sizeof(struct
1038 i40e_virtchnl_version_info));
1039 }
1040
1041 /**
1042 * i40e_vc_get_vf_resources_msg
1043 * @vf: pointer to the vf info
1044 * @msg: pointer to the msg buffer
1045 * @msglen: msg length
1046 *
1047 * called from the vf to request its resources
1048 **/
1049 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
1050 {
1051 struct i40e_virtchnl_vf_resource *vfres = NULL;
1052 struct i40e_pf *pf = vf->pf;
1053 i40e_status aq_ret = 0;
1054 struct i40e_vsi *vsi;
1055 int i = 0, len = 0;
1056 int num_vsis = 1;
1057 int ret;
1058
1059 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1060 aq_ret = I40E_ERR_PARAM;
1061 goto err;
1062 }
1063
1064 len = (sizeof(struct i40e_virtchnl_vf_resource) +
1065 sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
1066
1067 vfres = kzalloc(len, GFP_KERNEL);
1068 if (!vfres) {
1069 aq_ret = I40E_ERR_NO_MEMORY;
1070 len = 0;
1071 goto err;
1072 }
1073
1074 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
1075 vsi = pf->vsi[vf->lan_vsi_index];
1076 if (!vsi->info.pvid)
1077 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
1078
1079 vfres->num_vsis = num_vsis;
1080 vfres->num_queue_pairs = vf->num_queue_pairs;
1081 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1082 if (vf->lan_vsi_index) {
1083 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
1084 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
1085 vfres->vsi_res[i].num_queue_pairs =
1086 pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
1087 memcpy(vfres->vsi_res[i].default_mac_addr,
1088 vf->default_lan_addr.addr, ETH_ALEN);
1089 i++;
1090 }
1091 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
1092
1093 err:
1094 /* send the response back to the vf */
1095 ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
1096 aq_ret, (u8 *)vfres, len);
1097
1098 kfree(vfres);
1099 return ret;
1100 }
1101
1102 /**
1103 * i40e_vc_reset_vf_msg
1104 * @vf: pointer to the vf info
1105 * @msg: pointer to the msg buffer
1106 * @msglen: msg length
1107 *
1108 * called from the vf to reset itself,
1109 * unlike other virtchnl messages, pf driver
1110 * doesn't send the response back to the vf
1111 **/
1112 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1113 {
1114 if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1115 i40e_reset_vf(vf, false);
1116 }
1117
1118 /**
1119 * i40e_vc_config_promiscuous_mode_msg
1120 * @vf: pointer to the vf info
1121 * @msg: pointer to the msg buffer
1122 * @msglen: msg length
1123 *
1124 * called from the vf to configure the promiscuous mode of
1125 * vf vsis
1126 **/
1127 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1128 u8 *msg, u16 msglen)
1129 {
1130 struct i40e_virtchnl_promisc_info *info =
1131 (struct i40e_virtchnl_promisc_info *)msg;
1132 struct i40e_pf *pf = vf->pf;
1133 struct i40e_hw *hw = &pf->hw;
1134 bool allmulti = false;
1135 bool promisc = false;
1136 i40e_status aq_ret;
1137
1138 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1139 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1140 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1141 (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
1142 aq_ret = I40E_ERR_PARAM;
1143 goto error_param;
1144 }
1145
1146 if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
1147 promisc = true;
1148 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
1149 promisc, NULL);
1150 if (aq_ret)
1151 goto error_param;
1152
1153 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
1154 allmulti = true;
1155 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
1156 allmulti, NULL);
1157
1158 error_param:
1159 /* send the response to the vf */
1160 return i40e_vc_send_resp_to_vf(vf,
1161 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1162 aq_ret);
1163 }
1164
1165 /**
1166 * i40e_vc_config_queues_msg
1167 * @vf: pointer to the vf info
1168 * @msg: pointer to the msg buffer
1169 * @msglen: msg length
1170 *
1171 * called from the vf to configure the rx/tx
1172 * queues
1173 **/
1174 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1175 {
1176 struct i40e_virtchnl_vsi_queue_config_info *qci =
1177 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1178 struct i40e_virtchnl_queue_pair_info *qpi;
1179 u16 vsi_id, vsi_queue_id;
1180 i40e_status aq_ret = 0;
1181 int i;
1182
1183 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1184 aq_ret = I40E_ERR_PARAM;
1185 goto error_param;
1186 }
1187
1188 vsi_id = qci->vsi_id;
1189 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1190 aq_ret = I40E_ERR_PARAM;
1191 goto error_param;
1192 }
1193 for (i = 0; i < qci->num_queue_pairs; i++) {
1194 qpi = &qci->qpair[i];
1195 vsi_queue_id = qpi->txq.queue_id;
1196 if ((qpi->txq.vsi_id != vsi_id) ||
1197 (qpi->rxq.vsi_id != vsi_id) ||
1198 (qpi->rxq.queue_id != vsi_queue_id) ||
1199 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
1200 aq_ret = I40E_ERR_PARAM;
1201 goto error_param;
1202 }
1203
1204 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
1205 &qpi->rxq) ||
1206 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
1207 &qpi->txq)) {
1208 aq_ret = I40E_ERR_PARAM;
1209 goto error_param;
1210 }
1211 }
1212
1213 error_param:
1214 /* send the response to the vf */
1215 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1216 aq_ret);
1217 }
1218
1219 /**
1220 * i40e_vc_config_irq_map_msg
1221 * @vf: pointer to the vf info
1222 * @msg: pointer to the msg buffer
1223 * @msglen: msg length
1224 *
1225 * called from the vf to configure the irq to
1226 * queue map
1227 **/
1228 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1229 {
1230 struct i40e_virtchnl_irq_map_info *irqmap_info =
1231 (struct i40e_virtchnl_irq_map_info *)msg;
1232 struct i40e_virtchnl_vector_map *map;
1233 u16 vsi_id, vsi_queue_id, vector_id;
1234 i40e_status aq_ret = 0;
1235 unsigned long tempmap;
1236 int i;
1237
1238 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1239 aq_ret = I40E_ERR_PARAM;
1240 goto error_param;
1241 }
1242
1243 for (i = 0; i < irqmap_info->num_vectors; i++) {
1244 map = &irqmap_info->vecmap[i];
1245
1246 vector_id = map->vector_id;
1247 vsi_id = map->vsi_id;
1248 /* validate msg params */
1249 if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
1250 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1251 aq_ret = I40E_ERR_PARAM;
1252 goto error_param;
1253 }
1254
1255 /* lookout for the invalid queue index */
1256 tempmap = map->rxq_map;
1257 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1258 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1259 vsi_queue_id)) {
1260 aq_ret = I40E_ERR_PARAM;
1261 goto error_param;
1262 }
1263 }
1264
1265 tempmap = map->txq_map;
1266 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1267 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1268 vsi_queue_id)) {
1269 aq_ret = I40E_ERR_PARAM;
1270 goto error_param;
1271 }
1272 }
1273
1274 i40e_config_irq_link_list(vf, vsi_id, map);
1275 }
1276 error_param:
1277 /* send the response to the vf */
1278 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
1279 aq_ret);
1280 }
1281
1282 /**
1283 * i40e_vc_enable_queues_msg
1284 * @vf: pointer to the vf info
1285 * @msg: pointer to the msg buffer
1286 * @msglen: msg length
1287 *
1288 * called from the vf to enable all or specific queue(s)
1289 **/
1290 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1291 {
1292 struct i40e_virtchnl_queue_select *vqs =
1293 (struct i40e_virtchnl_queue_select *)msg;
1294 struct i40e_pf *pf = vf->pf;
1295 u16 vsi_id = vqs->vsi_id;
1296 i40e_status aq_ret = 0;
1297
1298 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1299 aq_ret = I40E_ERR_PARAM;
1300 goto error_param;
1301 }
1302
1303 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1304 aq_ret = I40E_ERR_PARAM;
1305 goto error_param;
1306 }
1307
1308 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1309 aq_ret = I40E_ERR_PARAM;
1310 goto error_param;
1311 }
1312 if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
1313 aq_ret = I40E_ERR_TIMEOUT;
1314 error_param:
1315 /* send the response to the vf */
1316 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1317 aq_ret);
1318 }
1319
1320 /**
1321 * i40e_vc_disable_queues_msg
1322 * @vf: pointer to the vf info
1323 * @msg: pointer to the msg buffer
1324 * @msglen: msg length
1325 *
1326 * called from the vf to disable all or specific
1327 * queue(s)
1328 **/
1329 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1330 {
1331 struct i40e_virtchnl_queue_select *vqs =
1332 (struct i40e_virtchnl_queue_select *)msg;
1333 struct i40e_pf *pf = vf->pf;
1334 u16 vsi_id = vqs->vsi_id;
1335 i40e_status aq_ret = 0;
1336
1337 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1338 aq_ret = I40E_ERR_PARAM;
1339 goto error_param;
1340 }
1341
1342 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1343 aq_ret = I40E_ERR_PARAM;
1344 goto error_param;
1345 }
1346
1347 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1348 aq_ret = I40E_ERR_PARAM;
1349 goto error_param;
1350 }
1351 if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
1352 aq_ret = I40E_ERR_TIMEOUT;
1353
1354 error_param:
1355 /* send the response to the vf */
1356 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1357 aq_ret);
1358 }
1359
1360 /**
1361 * i40e_vc_get_stats_msg
1362 * @vf: pointer to the vf info
1363 * @msg: pointer to the msg buffer
1364 * @msglen: msg length
1365 *
1366 * called from the vf to get vsi stats
1367 **/
1368 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1369 {
1370 struct i40e_virtchnl_queue_select *vqs =
1371 (struct i40e_virtchnl_queue_select *)msg;
1372 struct i40e_pf *pf = vf->pf;
1373 struct i40e_eth_stats stats;
1374 i40e_status aq_ret = 0;
1375 struct i40e_vsi *vsi;
1376
1377 memset(&stats, 0, sizeof(struct i40e_eth_stats));
1378
1379 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1380 aq_ret = I40E_ERR_PARAM;
1381 goto error_param;
1382 }
1383
1384 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1385 aq_ret = I40E_ERR_PARAM;
1386 goto error_param;
1387 }
1388
1389 vsi = pf->vsi[vqs->vsi_id];
1390 if (!vsi) {
1391 aq_ret = I40E_ERR_PARAM;
1392 goto error_param;
1393 }
1394 i40e_update_eth_stats(vsi);
1395 stats = vsi->eth_stats;
1396
1397 error_param:
1398 /* send the response back to the vf */
1399 return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
1400 (u8 *)&stats, sizeof(stats));
1401 }
1402
1403 /**
1404 * i40e_check_vf_permission
1405 * @vf: pointer to the vf info
1406 * @macaddr: pointer to the MAC Address being checked
1407 *
1408 * Check if the VF has permission to add or delete unicast MAC address
1409 * filters and return error code -EPERM if not. Then check if the
1410 * address filter requested is broadcast or zero and if so return
1411 * an invalid MAC address error code.
1412 **/
1413 static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
1414 {
1415 struct i40e_pf *pf = vf->pf;
1416 int ret = 0;
1417
1418 if (is_broadcast_ether_addr(macaddr) ||
1419 is_zero_ether_addr(macaddr)) {
1420 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
1421 ret = I40E_ERR_INVALID_MAC_ADDR;
1422 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
1423 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
1424 /* If the host VMM administrator has set the VF MAC address
1425 * administratively via the ndo_set_vf_mac command then deny
1426 * permission to the VF to add or delete unicast MAC addresses.
1427 * The VF may request to set the MAC address filter already
1428 * assigned to it so do not return an error in that case.
1429 */
1430 dev_err(&pf->pdev->dev,
1431 "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
1432 ret = -EPERM;
1433 }
1434 return ret;
1435 }
1436
1437 /**
1438 * i40e_vc_add_mac_addr_msg
1439 * @vf: pointer to the vf info
1440 * @msg: pointer to the msg buffer
1441 * @msglen: msg length
1442 *
1443 * add guest mac address filter
1444 **/
1445 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1446 {
1447 struct i40e_virtchnl_ether_addr_list *al =
1448 (struct i40e_virtchnl_ether_addr_list *)msg;
1449 struct i40e_pf *pf = vf->pf;
1450 struct i40e_vsi *vsi = NULL;
1451 u16 vsi_id = al->vsi_id;
1452 i40e_status ret = 0;
1453 int i;
1454
1455 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1456 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1457 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1458 ret = I40E_ERR_PARAM;
1459 goto error_param;
1460 }
1461
1462 for (i = 0; i < al->num_elements; i++) {
1463 ret = i40e_check_vf_permission(vf, al->list[i].addr);
1464 if (ret)
1465 goto error_param;
1466 }
1467 vsi = pf->vsi[vsi_id];
1468
1469 /* add new addresses to the list */
1470 for (i = 0; i < al->num_elements; i++) {
1471 struct i40e_mac_filter *f;
1472
1473 f = i40e_find_mac(vsi, al->list[i].addr, true, false);
1474 if (!f) {
1475 if (i40e_is_vsi_in_vlan(vsi))
1476 f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
1477 true, false);
1478 else
1479 f = i40e_add_filter(vsi, al->list[i].addr, -1,
1480 true, false);
1481 }
1482
1483 if (!f) {
1484 dev_err(&pf->pdev->dev,
1485 "Unable to add VF MAC filter\n");
1486 ret = I40E_ERR_PARAM;
1487 goto error_param;
1488 }
1489 }
1490
1491 /* program the updated filter list */
1492 if (i40e_sync_vsi_filters(vsi))
1493 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1494
1495 error_param:
1496 /* send the response to the vf */
1497 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1498 ret);
1499 }
1500
1501 /**
1502 * i40e_vc_del_mac_addr_msg
1503 * @vf: pointer to the vf info
1504 * @msg: pointer to the msg buffer
1505 * @msglen: msg length
1506 *
1507 * remove guest mac address filter
1508 **/
1509 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1510 {
1511 struct i40e_virtchnl_ether_addr_list *al =
1512 (struct i40e_virtchnl_ether_addr_list *)msg;
1513 struct i40e_pf *pf = vf->pf;
1514 struct i40e_vsi *vsi = NULL;
1515 u16 vsi_id = al->vsi_id;
1516 i40e_status ret = 0;
1517 int i;
1518
1519 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1520 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1521 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1522 ret = I40E_ERR_PARAM;
1523 goto error_param;
1524 }
1525
1526 for (i = 0; i < al->num_elements; i++) {
1527 ret = i40e_check_vf_permission(vf, al->list[i].addr);
1528 if (ret)
1529 goto error_param;
1530 }
1531 vsi = pf->vsi[vsi_id];
1532
1533 /* delete addresses from the list */
1534 for (i = 0; i < al->num_elements; i++)
1535 i40e_del_filter(vsi, al->list[i].addr,
1536 I40E_VLAN_ANY, true, false);
1537
1538 /* program the updated filter list */
1539 if (i40e_sync_vsi_filters(vsi))
1540 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1541
1542 error_param:
1543 /* send the response to the vf */
1544 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
1545 ret);
1546 }
1547
1548 /**
1549 * i40e_vc_add_vlan_msg
1550 * @vf: pointer to the vf info
1551 * @msg: pointer to the msg buffer
1552 * @msglen: msg length
1553 *
1554 * program guest vlan id
1555 **/
1556 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1557 {
1558 struct i40e_virtchnl_vlan_filter_list *vfl =
1559 (struct i40e_virtchnl_vlan_filter_list *)msg;
1560 struct i40e_pf *pf = vf->pf;
1561 struct i40e_vsi *vsi = NULL;
1562 u16 vsi_id = vfl->vsi_id;
1563 i40e_status aq_ret = 0;
1564 int i;
1565
1566 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1567 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1568 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1569 aq_ret = I40E_ERR_PARAM;
1570 goto error_param;
1571 }
1572
1573 for (i = 0; i < vfl->num_elements; i++) {
1574 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1575 aq_ret = I40E_ERR_PARAM;
1576 dev_err(&pf->pdev->dev,
1577 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
1578 goto error_param;
1579 }
1580 }
1581 vsi = pf->vsi[vsi_id];
1582 if (vsi->info.pvid) {
1583 aq_ret = I40E_ERR_PARAM;
1584 goto error_param;
1585 }
1586
1587 i40e_vlan_stripping_enable(vsi);
1588 for (i = 0; i < vfl->num_elements; i++) {
1589 /* add new VLAN filter */
1590 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
1591 if (ret)
1592 dev_err(&pf->pdev->dev,
1593 "Unable to add VF vlan filter %d, error %d\n",
1594 vfl->vlan_id[i], ret);
1595 }
1596
1597 error_param:
1598 /* send the response to the vf */
1599 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
1600 }
1601
1602 /**
1603 * i40e_vc_remove_vlan_msg
1604 * @vf: pointer to the vf info
1605 * @msg: pointer to the msg buffer
1606 * @msglen: msg length
1607 *
1608 * remove programmed guest vlan id
1609 **/
1610 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1611 {
1612 struct i40e_virtchnl_vlan_filter_list *vfl =
1613 (struct i40e_virtchnl_vlan_filter_list *)msg;
1614 struct i40e_pf *pf = vf->pf;
1615 struct i40e_vsi *vsi = NULL;
1616 u16 vsi_id = vfl->vsi_id;
1617 i40e_status aq_ret = 0;
1618 int i;
1619
1620 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1621 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1622 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1623 aq_ret = I40E_ERR_PARAM;
1624 goto error_param;
1625 }
1626
1627 for (i = 0; i < vfl->num_elements; i++) {
1628 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1629 aq_ret = I40E_ERR_PARAM;
1630 goto error_param;
1631 }
1632 }
1633
1634 vsi = pf->vsi[vsi_id];
1635 if (vsi->info.pvid) {
1636 aq_ret = I40E_ERR_PARAM;
1637 goto error_param;
1638 }
1639
1640 for (i = 0; i < vfl->num_elements; i++) {
1641 int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
1642 if (ret)
1643 dev_err(&pf->pdev->dev,
1644 "Unable to delete VF vlan filter %d, error %d\n",
1645 vfl->vlan_id[i], ret);
1646 }
1647
1648 error_param:
1649 /* send the response to the vf */
1650 return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
1651 }
1652
1653 /**
1654 * i40e_vc_validate_vf_msg
1655 * @vf: pointer to the vf info
1656 * @msg: pointer to the msg buffer
1657 * @msglen: msg length
1658 * @msghndl: msg handle
1659 *
1660 * validate msg
1661 **/
1662 static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
1663 u32 v_retval, u8 *msg, u16 msglen)
1664 {
1665 bool err_msg_format = false;
1666 int valid_len;
1667
1668 /* Check if VF is disabled. */
1669 if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
1670 return I40E_ERR_PARAM;
1671
1672 /* Validate message length. */
1673 switch (v_opcode) {
1674 case I40E_VIRTCHNL_OP_VERSION:
1675 valid_len = sizeof(struct i40e_virtchnl_version_info);
1676 break;
1677 case I40E_VIRTCHNL_OP_RESET_VF:
1678 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1679 valid_len = 0;
1680 break;
1681 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1682 valid_len = sizeof(struct i40e_virtchnl_txq_info);
1683 break;
1684 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1685 valid_len = sizeof(struct i40e_virtchnl_rxq_info);
1686 break;
1687 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1688 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
1689 if (msglen >= valid_len) {
1690 struct i40e_virtchnl_vsi_queue_config_info *vqc =
1691 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1692 valid_len += (vqc->num_queue_pairs *
1693 sizeof(struct
1694 i40e_virtchnl_queue_pair_info));
1695 if (vqc->num_queue_pairs == 0)
1696 err_msg_format = true;
1697 }
1698 break;
1699 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1700 valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
1701 if (msglen >= valid_len) {
1702 struct i40e_virtchnl_irq_map_info *vimi =
1703 (struct i40e_virtchnl_irq_map_info *)msg;
1704 valid_len += (vimi->num_vectors *
1705 sizeof(struct i40e_virtchnl_vector_map));
1706 if (vimi->num_vectors == 0)
1707 err_msg_format = true;
1708 }
1709 break;
1710 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1711 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1712 valid_len = sizeof(struct i40e_virtchnl_queue_select);
1713 break;
1714 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1715 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1716 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
1717 if (msglen >= valid_len) {
1718 struct i40e_virtchnl_ether_addr_list *veal =
1719 (struct i40e_virtchnl_ether_addr_list *)msg;
1720 valid_len += veal->num_elements *
1721 sizeof(struct i40e_virtchnl_ether_addr);
1722 if (veal->num_elements == 0)
1723 err_msg_format = true;
1724 }
1725 break;
1726 case I40E_VIRTCHNL_OP_ADD_VLAN:
1727 case I40E_VIRTCHNL_OP_DEL_VLAN:
1728 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
1729 if (msglen >= valid_len) {
1730 struct i40e_virtchnl_vlan_filter_list *vfl =
1731 (struct i40e_virtchnl_vlan_filter_list *)msg;
1732 valid_len += vfl->num_elements * sizeof(u16);
1733 if (vfl->num_elements == 0)
1734 err_msg_format = true;
1735 }
1736 break;
1737 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1738 valid_len = sizeof(struct i40e_virtchnl_promisc_info);
1739 break;
1740 case I40E_VIRTCHNL_OP_GET_STATS:
1741 valid_len = sizeof(struct i40e_virtchnl_queue_select);
1742 break;
1743 /* These are always errors coming from the VF. */
1744 case I40E_VIRTCHNL_OP_EVENT:
1745 case I40E_VIRTCHNL_OP_UNKNOWN:
1746 default:
1747 return -EPERM;
1748 break;
1749 }
1750 /* few more checks */
1751 if ((valid_len != msglen) || (err_msg_format)) {
1752 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
1753 return -EINVAL;
1754 } else {
1755 return 0;
1756 }
1757 }
1758
1759 /**
1760 * i40e_vc_process_vf_msg
1761 * @pf: pointer to the pf structure
1762 * @vf_id: source vf id
1763 * @msg: pointer to the msg buffer
1764 * @msglen: msg length
1765 * @msghndl: msg handle
1766 *
1767 * called from the common aeq/arq handler to
1768 * process request from vf
1769 **/
1770 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
1771 u32 v_retval, u8 *msg, u16 msglen)
1772 {
1773 struct i40e_hw *hw = &pf->hw;
1774 int local_vf_id = vf_id - hw->func_caps.vf_base_id;
1775 struct i40e_vf *vf;
1776 int ret;
1777
1778 pf->vf_aq_requests++;
1779 if (local_vf_id >= pf->num_alloc_vfs)
1780 return -EINVAL;
1781 vf = &(pf->vf[local_vf_id]);
1782 /* perform basic checks on the msg */
1783 ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
1784
1785 if (ret) {
1786 dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
1787 local_vf_id, v_opcode, msglen);
1788 return ret;
1789 }
1790 wr32(hw, I40E_VFGEN_RSTAT1(local_vf_id), I40E_VFR_VFACTIVE);
1791 switch (v_opcode) {
1792 case I40E_VIRTCHNL_OP_VERSION:
1793 ret = i40e_vc_get_version_msg(vf);
1794 break;
1795 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1796 ret = i40e_vc_get_vf_resources_msg(vf);
1797 break;
1798 case I40E_VIRTCHNL_OP_RESET_VF:
1799 i40e_vc_reset_vf_msg(vf);
1800 ret = 0;
1801 break;
1802 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1803 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
1804 break;
1805 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1806 ret = i40e_vc_config_queues_msg(vf, msg, msglen);
1807 break;
1808 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1809 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
1810 break;
1811 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1812 ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
1813 break;
1814 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1815 ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
1816 break;
1817 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1818 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
1819 break;
1820 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1821 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
1822 break;
1823 case I40E_VIRTCHNL_OP_ADD_VLAN:
1824 ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
1825 break;
1826 case I40E_VIRTCHNL_OP_DEL_VLAN:
1827 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
1828 break;
1829 case I40E_VIRTCHNL_OP_GET_STATS:
1830 ret = i40e_vc_get_stats_msg(vf, msg, msglen);
1831 break;
1832 case I40E_VIRTCHNL_OP_UNKNOWN:
1833 default:
1834 dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
1835 v_opcode, local_vf_id);
1836 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
1837 I40E_ERR_NOT_IMPLEMENTED);
1838 break;
1839 }
1840
1841 return ret;
1842 }
1843
1844 /**
1845 * i40e_vc_process_vflr_event
1846 * @pf: pointer to the pf structure
1847 *
1848 * called from the vlfr irq handler to
1849 * free up vf resources and state variables
1850 **/
1851 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
1852 {
1853 u32 reg, reg_idx, bit_idx, vf_id;
1854 struct i40e_hw *hw = &pf->hw;
1855 struct i40e_vf *vf;
1856
1857 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
1858 return 0;
1859
1860 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
1861 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
1862 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1863 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1864 /* read GLGEN_VFLRSTAT register to find out the flr vfs */
1865 vf = &pf->vf[vf_id];
1866 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
1867 if (reg & (1 << bit_idx)) {
1868 /* clear the bit in GLGEN_VFLRSTAT */
1869 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
1870
1871 i40e_reset_vf(vf, true);
1872 }
1873 }
1874
1875 /* re-enable vflr interrupt cause */
1876 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1877 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1878 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1879 i40e_flush(hw);
1880
1881 return 0;
1882 }
1883
1884 /**
1885 * i40e_vc_vf_broadcast
1886 * @pf: pointer to the pf structure
1887 * @opcode: operation code
1888 * @retval: return value
1889 * @msg: pointer to the msg buffer
1890 * @msglen: msg length
1891 *
1892 * send a message to all VFs on a given PF
1893 **/
1894 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
1895 enum i40e_virtchnl_ops v_opcode,
1896 i40e_status v_retval, u8 *msg,
1897 u16 msglen)
1898 {
1899 struct i40e_hw *hw = &pf->hw;
1900 struct i40e_vf *vf = pf->vf;
1901 int i;
1902
1903 for (i = 0; i < pf->num_alloc_vfs; i++) {
1904 /* Ignore return value on purpose - a given VF may fail, but
1905 * we need to keep going and send to all of them
1906 */
1907 i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
1908 msg, msglen, NULL);
1909 vf++;
1910 }
1911 }
1912
1913 /**
1914 * i40e_vc_notify_link_state
1915 * @pf: pointer to the pf structure
1916 *
1917 * send a link status message to all VFs on a given PF
1918 **/
1919 void i40e_vc_notify_link_state(struct i40e_pf *pf)
1920 {
1921 struct i40e_virtchnl_pf_event pfe;
1922
1923 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
1924 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
1925 pfe.event_data.link_event.link_status =
1926 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
1927 pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
1928
1929 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
1930 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
1931 }
1932
1933 /**
1934 * i40e_vc_notify_reset
1935 * @pf: pointer to the pf structure
1936 *
1937 * indicate a pending reset to all VFs on a given PF
1938 **/
1939 void i40e_vc_notify_reset(struct i40e_pf *pf)
1940 {
1941 struct i40e_virtchnl_pf_event pfe;
1942
1943 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
1944 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
1945 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
1946 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
1947 }
1948
1949 /**
1950 * i40e_vc_notify_vf_reset
1951 * @vf: pointer to the vf structure
1952 *
1953 * indicate a pending reset to the given VF
1954 **/
1955 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
1956 {
1957 struct i40e_virtchnl_pf_event pfe;
1958
1959 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
1960 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
1961 i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
1962 I40E_SUCCESS, (u8 *)&pfe,
1963 sizeof(struct i40e_virtchnl_pf_event), NULL);
1964 }
1965
1966 /**
1967 * i40e_ndo_set_vf_mac
1968 * @netdev: network interface device structure
1969 * @vf_id: vf identifier
1970 * @mac: mac address
1971 *
1972 * program vf mac address
1973 **/
1974 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1975 {
1976 struct i40e_netdev_priv *np = netdev_priv(netdev);
1977 struct i40e_vsi *vsi = np->vsi;
1978 struct i40e_pf *pf = vsi->back;
1979 struct i40e_mac_filter *f;
1980 struct i40e_vf *vf;
1981 int ret = 0;
1982
1983 /* validate the request */
1984 if (vf_id >= pf->num_alloc_vfs) {
1985 dev_err(&pf->pdev->dev,
1986 "Invalid VF Identifier %d\n", vf_id);
1987 ret = -EINVAL;
1988 goto error_param;
1989 }
1990
1991 vf = &(pf->vf[vf_id]);
1992 vsi = pf->vsi[vf->lan_vsi_index];
1993 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1994 dev_err(&pf->pdev->dev,
1995 "Uninitialized VF %d\n", vf_id);
1996 ret = -EINVAL;
1997 goto error_param;
1998 }
1999
2000 if (!is_valid_ether_addr(mac)) {
2001 dev_err(&pf->pdev->dev,
2002 "Invalid VF ethernet address\n");
2003 ret = -EINVAL;
2004 goto error_param;
2005 }
2006
2007 /* delete the temporary mac address */
2008 i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
2009
2010 /* add the new mac address */
2011 f = i40e_add_filter(vsi, mac, 0, true, false);
2012 if (!f) {
2013 dev_err(&pf->pdev->dev,
2014 "Unable to add VF ucast filter\n");
2015 ret = -ENOMEM;
2016 goto error_param;
2017 }
2018
2019 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
2020 /* program mac filter */
2021 if (i40e_sync_vsi_filters(vsi)) {
2022 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
2023 ret = -EIO;
2024 goto error_param;
2025 }
2026 memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
2027 vf->pf_set_mac = true;
2028 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2029 ret = 0;
2030
2031 error_param:
2032 return ret;
2033 }
2034
2035 /**
2036 * i40e_ndo_set_vf_port_vlan
2037 * @netdev: network interface device structure
2038 * @vf_id: vf identifier
2039 * @vlan_id: mac address
2040 * @qos: priority setting
2041 *
2042 * program vf vlan id and/or qos
2043 **/
2044 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
2045 int vf_id, u16 vlan_id, u8 qos)
2046 {
2047 struct i40e_netdev_priv *np = netdev_priv(netdev);
2048 struct i40e_pf *pf = np->vsi->back;
2049 struct i40e_vsi *vsi;
2050 struct i40e_vf *vf;
2051 int ret = 0;
2052
2053 /* validate the request */
2054 if (vf_id >= pf->num_alloc_vfs) {
2055 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2056 ret = -EINVAL;
2057 goto error_pvid;
2058 }
2059
2060 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
2061 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2062 ret = -EINVAL;
2063 goto error_pvid;
2064 }
2065
2066 vf = &(pf->vf[vf_id]);
2067 vsi = pf->vsi[vf->lan_vsi_index];
2068 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2069 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2070 ret = -EINVAL;
2071 goto error_pvid;
2072 }
2073
2074 if (vsi->info.pvid) {
2075 /* kill old VLAN */
2076 ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2077 VLAN_VID_MASK));
2078 if (ret) {
2079 dev_info(&vsi->back->pdev->dev,
2080 "remove VLAN failed, ret=%d, aq_err=%d\n",
2081 ret, pf->hw.aq.asq_last_status);
2082 }
2083 }
2084 if (vlan_id || qos)
2085 ret = i40e_vsi_add_pvid(vsi,
2086 vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
2087 else
2088 i40e_vsi_remove_pvid(vsi);
2089
2090 if (vlan_id) {
2091 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2092 vlan_id, qos, vf_id);
2093
2094 /* add new VLAN filter */
2095 ret = i40e_vsi_add_vlan(vsi, vlan_id);
2096 if (ret) {
2097 dev_info(&vsi->back->pdev->dev,
2098 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
2099 vsi->back->hw.aq.asq_last_status);
2100 goto error_pvid;
2101 }
2102 }
2103
2104 if (ret) {
2105 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
2106 goto error_pvid;
2107 }
2108 /* The Port VLAN needs to be saved across resets the same as the
2109 * default LAN MAC address.
2110 */
2111 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
2112 ret = 0;
2113
2114 error_pvid:
2115 return ret;
2116 }
2117
2118 /**
2119 * i40e_ndo_set_vf_bw
2120 * @netdev: network interface device structure
2121 * @vf_id: vf identifier
2122 * @tx_rate: tx rate
2123 *
2124 * configure vf tx rate
2125 **/
2126 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
2127 {
2128 return -EOPNOTSUPP;
2129 }
2130
2131 /**
2132 * i40e_ndo_get_vf_config
2133 * @netdev: network interface device structure
2134 * @vf_id: vf identifier
2135 * @ivi: vf configuration structure
2136 *
2137 * return vf configuration
2138 **/
2139 int i40e_ndo_get_vf_config(struct net_device *netdev,
2140 int vf_id, struct ifla_vf_info *ivi)
2141 {
2142 struct i40e_netdev_priv *np = netdev_priv(netdev);
2143 struct i40e_vsi *vsi = np->vsi;
2144 struct i40e_pf *pf = vsi->back;
2145 struct i40e_vf *vf;
2146 int ret = 0;
2147
2148 /* validate the request */
2149 if (vf_id >= pf->num_alloc_vfs) {
2150 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2151 ret = -EINVAL;
2152 goto error_param;
2153 }
2154
2155 vf = &(pf->vf[vf_id]);
2156 /* first vsi is always the LAN vsi */
2157 vsi = pf->vsi[vf->lan_vsi_index];
2158 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2159 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2160 ret = -EINVAL;
2161 goto error_param;
2162 }
2163
2164 ivi->vf = vf_id;
2165
2166 memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
2167
2168 ivi->tx_rate = 0;
2169 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
2170 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
2171 I40E_VLAN_PRIORITY_SHIFT;
2172 ret = 0;
2173
2174 error_param:
2175 return ret;
2176 }
This page took 0.174042 seconds and 5 git commands to generate.