1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2010-2011 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
10 #include <linux/module.h>
11 #include "net_driver.h"
17 #include "mcdi_pcol.h"
21 /* Number of longs required to track all the VIs in a VF */
22 #define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
24 /* Maximum number of RX queues supported */
25 #define VF_MAX_RX_QUEUES 63
28 * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
29 * @VF_TX_FILTER_OFF: Disabled
30 * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only
31 * 2 TX queues allowed per VF.
32 * @VF_TX_FILTER_ON: Enabled
34 enum efx_vf_tx_filter_mode
{
41 * struct efx_vf - Back-end resource and protocol state for a PCI VF
42 * @efx: The Efx NIC owning this VF
43 * @pci_rid: The PCI requester ID for this VF
44 * @pci_name: The PCI name (formatted address) of this VF
45 * @index: Index of VF within its port and PF.
46 * @req: VFDI incoming request work item. Incoming USR_EV events are received
47 * by the NAPI handler, but must be handled by executing MCDI requests
49 * @req_addr: VFDI incoming request DMA address (in VF's PCI address space).
50 * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member.
51 * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member.
52 * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by
54 * @busy: VFDI request queued to be processed or being processed. Receiving
55 * a VFDI request when @busy is set is an error condition.
56 * @buf: Incoming VFDI requests are DMA from the VF into this buffer.
57 * @buftbl_base: Buffer table entries for this VF start at this index.
58 * @rx_filtering: Receive filtering has been requested by the VF driver.
59 * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request.
60 * @rx_filter_qid: VF relative qid for RX filter requested by VF.
61 * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported.
62 * @tx_filter_mode: Transmit MAC filtering mode.
63 * @tx_filter_id: Transmit MAC filter ID.
64 * @addr: The MAC address and outer vlan tag of the VF.
65 * @status_addr: VF DMA address of page for &struct vfdi_status updates.
66 * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr,
67 * @peer_page_addrs and @peer_page_count from simultaneous
68 * updates by the VM and consumption by
69 * efx_sriov_update_vf_addr()
70 * @peer_page_addrs: Pointer to an array of guest pages for local addresses.
71 * @peer_page_count: Number of entries in @peer_page_count.
72 * @evq0_addrs: Array of guest pages backing evq0.
73 * @evq0_count: Number of entries in @evq0_addrs.
74 * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler
75 * to wait for flush completions.
76 * @txq_lock: Mutex for TX queue allocation.
77 * @txq_mask: Mask of initialized transmit queues.
78 * @txq_count: Number of initialized transmit queues.
79 * @rxq_mask: Mask of initialized receive queues.
80 * @rxq_count: Number of initialized receive queues.
81 * @rxq_retry_mask: Mask or receive queues that need to be flushed again
82 * due to flush failure.
83 * @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
84 * @reset_work: Work item to schedule a VF reset.
89 char pci_name
[13]; /* dddd:bb:dd.f */
91 struct work_struct req
;
97 struct efx_buffer buf
;
100 enum efx_filter_flags rx_filter_flags
;
101 unsigned rx_filter_qid
;
103 enum efx_vf_tx_filter_mode tx_filter_mode
;
105 struct vfdi_endpoint addr
;
107 struct mutex status_lock
;
108 u64
*peer_page_addrs
;
109 unsigned peer_page_count
;
110 u64 evq0_addrs
[EFX_MAX_VF_EVQ_SIZE
* sizeof(efx_qword_t
) /
113 wait_queue_head_t flush_waitq
;
114 struct mutex txq_lock
;
115 unsigned long txq_mask
[VI_MASK_LENGTH
];
117 unsigned long rxq_mask
[VI_MASK_LENGTH
];
119 unsigned long rxq_retry_mask
[VI_MASK_LENGTH
];
120 atomic_t rxq_retry_count
;
121 struct work_struct reset_work
;
124 struct efx_memcpy_req
{
125 unsigned int from_rid
;
134 * struct efx_local_addr - A MAC address on the vswitch without a VF.
136 * Siena does not have a switch, so VFs can't transmit data to each
137 * other. Instead the VFs must be made aware of the local addresses
138 * on the vswitch, so that they can arrange for an alternative
139 * software datapath to be used.
141 * @link: List head for insertion into efx->local_addr_list.
142 * @addr: Ethernet address
144 struct efx_local_addr
{
145 struct list_head link
;
150 * struct efx_endpoint_page - Page of vfdi_endpoint structures
152 * @link: List head for insertion into efx->local_page_list.
153 * @ptr: Pointer to page.
154 * @addr: DMA address of page.
156 struct efx_endpoint_page
{
157 struct list_head link
;
162 /* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */
163 #define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \
164 ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid))
165 #define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \
166 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
167 (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
168 #define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \
169 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
170 (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
172 #define EFX_FIELD_MASK(_field) \
173 ((1 << _field ## _WIDTH) - 1)
175 /* VFs can only use this many transmit channels */
176 static unsigned int vf_max_tx_channels
= 2;
177 module_param(vf_max_tx_channels
, uint
, 0444);
178 MODULE_PARM_DESC(vf_max_tx_channels
,
179 "Limit the number of TX channels VFs can use");
181 static int max_vfs
= -1;
182 module_param(max_vfs
, int, 0444);
183 MODULE_PARM_DESC(max_vfs
,
184 "Reduce the number of VFs initialized by the driver");
186 /* Workqueue used by VFDI communication. We can't use the global
187 * workqueue because it may be running the VF driver's probe()
188 * routine, which will be blocked there waiting for a VFDI response.
190 static struct workqueue_struct
*vfdi_workqueue
;
192 static unsigned abs_index(struct efx_vf
*vf
, unsigned index
)
194 return EFX_VI_BASE
+ vf
->index
* efx_vf_size(vf
->efx
) + index
;
197 static int efx_sriov_cmd(struct efx_nic
*efx
, bool enable
,
198 unsigned *vi_scale_out
, unsigned *vf_total_out
)
200 MCDI_DECLARE_BUF(inbuf
, MC_CMD_SRIOV_IN_LEN
);
201 MCDI_DECLARE_BUF(outbuf
, MC_CMD_SRIOV_OUT_LEN
);
202 unsigned vi_scale
, vf_total
;
206 MCDI_SET_DWORD(inbuf
, SRIOV_IN_ENABLE
, enable
? 1 : 0);
207 MCDI_SET_DWORD(inbuf
, SRIOV_IN_VI_BASE
, EFX_VI_BASE
);
208 MCDI_SET_DWORD(inbuf
, SRIOV_IN_VF_COUNT
, efx
->vf_count
);
210 rc
= efx_mcdi_rpc(efx
, MC_CMD_SRIOV
, inbuf
, MC_CMD_SRIOV_IN_LEN
,
211 outbuf
, MC_CMD_SRIOV_OUT_LEN
, &outlen
);
214 if (outlen
< MC_CMD_SRIOV_OUT_LEN
)
217 vf_total
= MCDI_DWORD(outbuf
, SRIOV_OUT_VF_TOTAL
);
218 vi_scale
= MCDI_DWORD(outbuf
, SRIOV_OUT_VI_SCALE
);
219 if (vi_scale
> EFX_VI_SCALE_MAX
)
223 *vi_scale_out
= vi_scale
;
225 *vf_total_out
= vf_total
;
230 static void efx_sriov_usrev(struct efx_nic
*efx
, bool enabled
)
234 EFX_POPULATE_OWORD_2(reg
,
235 FRF_CZ_USREV_DIS
, enabled
? 0 : 1,
236 FRF_CZ_DFLT_EVQ
, efx
->vfdi_channel
->channel
);
237 efx_writeo(efx
, ®
, FR_CZ_USR_EV_CFG
);
240 static int efx_sriov_memcpy(struct efx_nic
*efx
, struct efx_memcpy_req
*req
,
245 u32 from_rid
, from_hi
, from_lo
;
248 mb(); /* Finish writing source/reading dest before DMA starts */
250 used
= MC_CMD_MEMCPY_IN_LEN(count
);
251 if (WARN_ON(used
> MCDI_CTL_SDU_LEN_MAX_V1
))
254 /* Allocate room for the largest request */
255 inbuf
= kzalloc(MCDI_CTL_SDU_LEN_MAX_V1
, GFP_KERNEL
);
260 MCDI_SET_DWORD(record
, MEMCPY_IN_RECORD
, count
);
261 while (count
-- > 0) {
262 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_TO_RID
,
264 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO
,
266 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI
,
267 (u32
)(req
->to_addr
>> 32));
268 if (req
->from_buf
== NULL
) {
269 from_rid
= req
->from_rid
;
270 from_lo
= (u32
)req
->from_addr
;
271 from_hi
= (u32
)(req
->from_addr
>> 32);
273 if (WARN_ON(used
+ req
->length
>
274 MCDI_CTL_SDU_LEN_MAX_V1
)) {
279 from_rid
= MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE
;
282 memcpy(inbuf
+ used
, req
->from_buf
, req
->length
);
286 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_FROM_RID
, from_rid
);
287 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO
,
289 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI
,
291 MCDI_SET_DWORD(record
, MEMCPY_RECORD_TYPEDEF_LENGTH
,
295 record
+= MC_CMD_MEMCPY_IN_RECORD_LEN
;
298 rc
= efx_mcdi_rpc(efx
, MC_CMD_MEMCPY
, inbuf
, used
, NULL
, 0, NULL
);
302 mb(); /* Don't write source/read dest before DMA is complete */
307 /* The TX filter is entirely controlled by this driver, and is modified
308 * underneath the feet of the VF
310 static void efx_sriov_reset_tx_filter(struct efx_vf
*vf
)
312 struct efx_nic
*efx
= vf
->efx
;
313 struct efx_filter_spec filter
;
317 if (vf
->tx_filter_id
!= -1) {
318 efx_filter_remove_id_safe(efx
, EFX_FILTER_PRI_REQUIRED
,
320 netif_dbg(efx
, hw
, efx
->net_dev
, "Removed vf %s tx filter %d\n",
321 vf
->pci_name
, vf
->tx_filter_id
);
322 vf
->tx_filter_id
= -1;
325 if (is_zero_ether_addr(vf
->addr
.mac_addr
))
328 /* Turn on TX filtering automatically if not explicitly
329 * enabled or disabled.
331 if (vf
->tx_filter_mode
== VF_TX_FILTER_AUTO
&& vf_max_tx_channels
<= 2)
332 vf
->tx_filter_mode
= VF_TX_FILTER_ON
;
334 vlan
= ntohs(vf
->addr
.tci
) & VLAN_VID_MASK
;
335 efx_filter_init_tx(&filter
, abs_index(vf
, 0));
336 rc
= efx_filter_set_eth_local(&filter
,
337 vlan
? vlan
: EFX_FILTER_VID_UNSPEC
,
341 rc
= efx_filter_insert_filter(efx
, &filter
, true);
343 netif_warn(efx
, hw
, efx
->net_dev
,
344 "Unable to migrate tx filter for vf %s\n",
347 netif_dbg(efx
, hw
, efx
->net_dev
, "Inserted vf %s tx filter %d\n",
349 vf
->tx_filter_id
= rc
;
353 /* The RX filter is managed here on behalf of the VF driver */
354 static void efx_sriov_reset_rx_filter(struct efx_vf
*vf
)
356 struct efx_nic
*efx
= vf
->efx
;
357 struct efx_filter_spec filter
;
361 if (vf
->rx_filter_id
!= -1) {
362 efx_filter_remove_id_safe(efx
, EFX_FILTER_PRI_REQUIRED
,
364 netif_dbg(efx
, hw
, efx
->net_dev
, "Removed vf %s rx filter %d\n",
365 vf
->pci_name
, vf
->rx_filter_id
);
366 vf
->rx_filter_id
= -1;
369 if (!vf
->rx_filtering
|| is_zero_ether_addr(vf
->addr
.mac_addr
))
372 vlan
= ntohs(vf
->addr
.tci
) & VLAN_VID_MASK
;
373 efx_filter_init_rx(&filter
, EFX_FILTER_PRI_REQUIRED
,
375 abs_index(vf
, vf
->rx_filter_qid
));
376 rc
= efx_filter_set_eth_local(&filter
,
377 vlan
? vlan
: EFX_FILTER_VID_UNSPEC
,
381 rc
= efx_filter_insert_filter(efx
, &filter
, true);
383 netif_warn(efx
, hw
, efx
->net_dev
,
384 "Unable to insert rx filter for vf %s\n",
387 netif_dbg(efx
, hw
, efx
->net_dev
, "Inserted vf %s rx filter %d\n",
389 vf
->rx_filter_id
= rc
;
393 static void __efx_sriov_update_vf_addr(struct efx_vf
*vf
)
395 efx_sriov_reset_tx_filter(vf
);
396 efx_sriov_reset_rx_filter(vf
);
397 queue_work(vfdi_workqueue
, &vf
->efx
->peer_work
);
400 /* Push the peer list to this VF. The caller must hold status_lock to interlock
401 * with VFDI requests, and they must be serialised against manipulation of
402 * local_page_list, either by acquiring local_lock or by running from
403 * efx_sriov_peer_work()
405 static void __efx_sriov_push_vf_status(struct efx_vf
*vf
)
407 struct efx_nic
*efx
= vf
->efx
;
408 struct vfdi_status
*status
= efx
->vfdi_status
.addr
;
409 struct efx_memcpy_req copy
[4];
410 struct efx_endpoint_page
*epp
;
411 unsigned int pos
, count
;
412 unsigned data_offset
;
415 WARN_ON(!mutex_is_locked(&vf
->status_lock
));
416 WARN_ON(!vf
->status_addr
);
418 status
->local
= vf
->addr
;
419 status
->generation_end
= ++status
->generation_start
;
421 memset(copy
, '\0', sizeof(copy
));
422 /* Write generation_start */
423 copy
[0].from_buf
= &status
->generation_start
;
424 copy
[0].to_rid
= vf
->pci_rid
;
425 copy
[0].to_addr
= vf
->status_addr
+ offsetof(struct vfdi_status
,
427 copy
[0].length
= sizeof(status
->generation_start
);
428 /* DMA the rest of the structure (excluding the generations). This
429 * assumes that the non-generation portion of vfdi_status is in
430 * one chunk starting at the version member.
432 data_offset
= offsetof(struct vfdi_status
, version
);
433 copy
[1].from_rid
= efx
->pci_dev
->devfn
;
434 copy
[1].from_addr
= efx
->vfdi_status
.dma_addr
+ data_offset
;
435 copy
[1].to_rid
= vf
->pci_rid
;
436 copy
[1].to_addr
= vf
->status_addr
+ data_offset
;
437 copy
[1].length
= status
->length
- data_offset
;
439 /* Copy the peer pages */
442 list_for_each_entry(epp
, &efx
->local_page_list
, link
) {
443 if (count
== vf
->peer_page_count
) {
444 /* The VF driver will know they need to provide more
445 * pages because peer_addr_count is too large.
449 copy
[pos
].from_buf
= NULL
;
450 copy
[pos
].from_rid
= efx
->pci_dev
->devfn
;
451 copy
[pos
].from_addr
= epp
->addr
;
452 copy
[pos
].to_rid
= vf
->pci_rid
;
453 copy
[pos
].to_addr
= vf
->peer_page_addrs
[count
];
454 copy
[pos
].length
= EFX_PAGE_SIZE
;
456 if (++pos
== ARRAY_SIZE(copy
)) {
457 efx_sriov_memcpy(efx
, copy
, ARRAY_SIZE(copy
));
463 /* Write generation_end */
464 copy
[pos
].from_buf
= &status
->generation_end
;
465 copy
[pos
].to_rid
= vf
->pci_rid
;
466 copy
[pos
].to_addr
= vf
->status_addr
+ offsetof(struct vfdi_status
,
468 copy
[pos
].length
= sizeof(status
->generation_end
);
469 efx_sriov_memcpy(efx
, copy
, pos
+ 1);
471 /* Notify the guest */
472 EFX_POPULATE_QWORD_3(event
,
473 FSF_AZ_EV_CODE
, FSE_CZ_EV_CODE_USER_EV
,
474 VFDI_EV_SEQ
, (vf
->msg_seqno
& 0xff),
475 VFDI_EV_TYPE
, VFDI_EV_TYPE_STATUS
);
477 efx_generate_event(efx
, EFX_VI_BASE
+ vf
->index
* efx_vf_size(efx
),
481 static void efx_sriov_bufs(struct efx_nic
*efx
, unsigned offset
,
482 u64
*addr
, unsigned count
)
487 for (pos
= 0; pos
< count
; ++pos
) {
488 EFX_POPULATE_QWORD_3(buf
,
489 FRF_AZ_BUF_ADR_REGION
, 0,
491 addr
? addr
[pos
] >> 12 : 0,
492 FRF_AZ_BUF_OWNER_ID_FBUF
, 0);
493 efx_sram_writeq(efx
, efx
->membase
+ FR_BZ_BUF_FULL_TBL
,
498 static bool bad_vf_index(struct efx_nic
*efx
, unsigned index
)
500 return index
>= efx_vf_size(efx
);
503 static bool bad_buf_count(unsigned buf_count
, unsigned max_entry_count
)
505 unsigned max_buf_count
= max_entry_count
*
506 sizeof(efx_qword_t
) / EFX_BUF_SIZE
;
508 return ((buf_count
& (buf_count
- 1)) || buf_count
> max_buf_count
);
511 /* Check that VI specified by per-port index belongs to a VF.
512 * Optionally set VF index and VI index within the VF.
514 static bool map_vi_index(struct efx_nic
*efx
, unsigned abs_index
,
515 struct efx_vf
**vf_out
, unsigned *rel_index_out
)
519 if (abs_index
< EFX_VI_BASE
)
521 vf_i
= (abs_index
- EFX_VI_BASE
) / efx_vf_size(efx
);
522 if (vf_i
>= efx
->vf_init_count
)
526 *vf_out
= efx
->vf
+ vf_i
;
528 *rel_index_out
= abs_index
% efx_vf_size(efx
);
532 static int efx_vfdi_init_evq(struct efx_vf
*vf
)
534 struct efx_nic
*efx
= vf
->efx
;
535 struct vfdi_req
*req
= vf
->buf
.addr
;
536 unsigned vf_evq
= req
->u
.init_evq
.index
;
537 unsigned buf_count
= req
->u
.init_evq
.buf_count
;
538 unsigned abs_evq
= abs_index(vf
, vf_evq
);
539 unsigned buftbl
= EFX_BUFTBL_EVQ_BASE(vf
, vf_evq
);
542 if (bad_vf_index(efx
, vf_evq
) ||
543 bad_buf_count(buf_count
, EFX_MAX_VF_EVQ_SIZE
)) {
545 netif_err(efx
, hw
, efx
->net_dev
,
546 "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n",
547 vf
->pci_name
, vf_evq
, buf_count
);
548 return VFDI_RC_EINVAL
;
551 efx_sriov_bufs(efx
, buftbl
, req
->u
.init_evq
.addr
, buf_count
);
553 EFX_POPULATE_OWORD_3(reg
,
554 FRF_CZ_TIMER_Q_EN
, 1,
555 FRF_CZ_HOST_NOTIFY_MODE
, 0,
556 FRF_CZ_TIMER_MODE
, FFE_CZ_TIMER_MODE_DIS
);
557 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, abs_evq
);
558 EFX_POPULATE_OWORD_3(reg
,
560 FRF_AZ_EVQ_SIZE
, __ffs(buf_count
),
561 FRF_AZ_EVQ_BUF_BASE_ID
, buftbl
);
562 efx_writeo_table(efx
, ®
, FR_BZ_EVQ_PTR_TBL
, abs_evq
);
565 memcpy(vf
->evq0_addrs
, req
->u
.init_evq
.addr
,
566 buf_count
* sizeof(u64
));
567 vf
->evq0_count
= buf_count
;
570 return VFDI_RC_SUCCESS
;
573 static int efx_vfdi_init_rxq(struct efx_vf
*vf
)
575 struct efx_nic
*efx
= vf
->efx
;
576 struct vfdi_req
*req
= vf
->buf
.addr
;
577 unsigned vf_rxq
= req
->u
.init_rxq
.index
;
578 unsigned vf_evq
= req
->u
.init_rxq
.evq
;
579 unsigned buf_count
= req
->u
.init_rxq
.buf_count
;
580 unsigned buftbl
= EFX_BUFTBL_RXQ_BASE(vf
, vf_rxq
);
584 if (bad_vf_index(efx
, vf_evq
) || bad_vf_index(efx
, vf_rxq
) ||
585 vf_rxq
>= VF_MAX_RX_QUEUES
||
586 bad_buf_count(buf_count
, EFX_MAX_DMAQ_SIZE
)) {
588 netif_err(efx
, hw
, efx
->net_dev
,
589 "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d "
590 "buf_count %d\n", vf
->pci_name
, vf_rxq
,
592 return VFDI_RC_EINVAL
;
594 if (__test_and_set_bit(req
->u
.init_rxq
.index
, vf
->rxq_mask
))
596 efx_sriov_bufs(efx
, buftbl
, req
->u
.init_rxq
.addr
, buf_count
);
598 label
= req
->u
.init_rxq
.label
& EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL
);
599 EFX_POPULATE_OWORD_6(reg
,
600 FRF_AZ_RX_DESCQ_BUF_BASE_ID
, buftbl
,
601 FRF_AZ_RX_DESCQ_EVQ_ID
, abs_index(vf
, vf_evq
),
602 FRF_AZ_RX_DESCQ_LABEL
, label
,
603 FRF_AZ_RX_DESCQ_SIZE
, __ffs(buf_count
),
604 FRF_AZ_RX_DESCQ_JUMBO
,
605 !!(req
->u
.init_rxq
.flags
&
606 VFDI_RXQ_FLAG_SCATTER_EN
),
607 FRF_AZ_RX_DESCQ_EN
, 1);
608 efx_writeo_table(efx
, ®
, FR_BZ_RX_DESC_PTR_TBL
,
609 abs_index(vf
, vf_rxq
));
611 return VFDI_RC_SUCCESS
;
614 static int efx_vfdi_init_txq(struct efx_vf
*vf
)
616 struct efx_nic
*efx
= vf
->efx
;
617 struct vfdi_req
*req
= vf
->buf
.addr
;
618 unsigned vf_txq
= req
->u
.init_txq
.index
;
619 unsigned vf_evq
= req
->u
.init_txq
.evq
;
620 unsigned buf_count
= req
->u
.init_txq
.buf_count
;
621 unsigned buftbl
= EFX_BUFTBL_TXQ_BASE(vf
, vf_txq
);
622 unsigned label
, eth_filt_en
;
625 if (bad_vf_index(efx
, vf_evq
) || bad_vf_index(efx
, vf_txq
) ||
626 vf_txq
>= vf_max_tx_channels
||
627 bad_buf_count(buf_count
, EFX_MAX_DMAQ_SIZE
)) {
629 netif_err(efx
, hw
, efx
->net_dev
,
630 "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d "
631 "buf_count %d\n", vf
->pci_name
, vf_txq
,
633 return VFDI_RC_EINVAL
;
636 mutex_lock(&vf
->txq_lock
);
637 if (__test_and_set_bit(req
->u
.init_txq
.index
, vf
->txq_mask
))
639 mutex_unlock(&vf
->txq_lock
);
640 efx_sriov_bufs(efx
, buftbl
, req
->u
.init_txq
.addr
, buf_count
);
642 eth_filt_en
= vf
->tx_filter_mode
== VF_TX_FILTER_ON
;
644 label
= req
->u
.init_txq
.label
& EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL
);
645 EFX_POPULATE_OWORD_8(reg
,
646 FRF_CZ_TX_DPT_Q_MASK_WIDTH
, min(efx
->vi_scale
, 1U),
647 FRF_CZ_TX_DPT_ETH_FILT_EN
, eth_filt_en
,
648 FRF_AZ_TX_DESCQ_EN
, 1,
649 FRF_AZ_TX_DESCQ_BUF_BASE_ID
, buftbl
,
650 FRF_AZ_TX_DESCQ_EVQ_ID
, abs_index(vf
, vf_evq
),
651 FRF_AZ_TX_DESCQ_LABEL
, label
,
652 FRF_AZ_TX_DESCQ_SIZE
, __ffs(buf_count
),
653 FRF_BZ_TX_NON_IP_DROP_DIS
, 1);
654 efx_writeo_table(efx
, ®
, FR_BZ_TX_DESC_PTR_TBL
,
655 abs_index(vf
, vf_txq
));
657 return VFDI_RC_SUCCESS
;
660 /* Returns true when efx_vfdi_fini_all_queues should wake */
661 static bool efx_vfdi_flush_wake(struct efx_vf
*vf
)
663 /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
666 return (!vf
->txq_count
&& !vf
->rxq_count
) ||
667 atomic_read(&vf
->rxq_retry_count
);
670 static void efx_vfdi_flush_clear(struct efx_vf
*vf
)
672 memset(vf
->txq_mask
, 0, sizeof(vf
->txq_mask
));
674 memset(vf
->rxq_mask
, 0, sizeof(vf
->rxq_mask
));
676 memset(vf
->rxq_retry_mask
, 0, sizeof(vf
->rxq_retry_mask
));
677 atomic_set(&vf
->rxq_retry_count
, 0);
680 static int efx_vfdi_fini_all_queues(struct efx_vf
*vf
)
682 struct efx_nic
*efx
= vf
->efx
;
684 unsigned count
= efx_vf_size(efx
);
685 unsigned vf_offset
= EFX_VI_BASE
+ vf
->index
* efx_vf_size(efx
);
686 unsigned timeout
= HZ
;
687 unsigned index
, rxqs_count
;
691 BUILD_BUG_ON(VF_MAX_RX_QUEUES
>
692 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM
);
694 rxqs
= kmalloc(count
* sizeof(*rxqs
), GFP_KERNEL
);
696 return VFDI_RC_ENOMEM
;
699 siena_prepare_flush(efx
);
702 /* Flush all the initialized queues */
704 for (index
= 0; index
< count
; ++index
) {
705 if (test_bit(index
, vf
->txq_mask
)) {
706 EFX_POPULATE_OWORD_2(reg
,
707 FRF_AZ_TX_FLUSH_DESCQ_CMD
, 1,
708 FRF_AZ_TX_FLUSH_DESCQ
,
710 efx_writeo(efx
, ®
, FR_AZ_TX_FLUSH_DESCQ
);
712 if (test_bit(index
, vf
->rxq_mask
))
713 rxqs
[rxqs_count
++] = cpu_to_le32(vf_offset
+ index
);
716 atomic_set(&vf
->rxq_retry_count
, 0);
717 while (timeout
&& (vf
->rxq_count
|| vf
->txq_count
)) {
718 rc
= efx_mcdi_rpc(efx
, MC_CMD_FLUSH_RX_QUEUES
, (u8
*)rxqs
,
719 rxqs_count
* sizeof(*rxqs
), NULL
, 0, NULL
);
722 timeout
= wait_event_timeout(vf
->flush_waitq
,
723 efx_vfdi_flush_wake(vf
),
726 for (index
= 0; index
< count
; ++index
) {
727 if (test_and_clear_bit(index
, vf
->rxq_retry_mask
)) {
728 atomic_dec(&vf
->rxq_retry_count
);
730 cpu_to_le32(vf_offset
+ index
);
736 siena_finish_flush(efx
);
739 /* Irrespective of success/failure, fini the queues */
741 for (index
= 0; index
< count
; ++index
) {
742 efx_writeo_table(efx
, ®
, FR_BZ_RX_DESC_PTR_TBL
,
744 efx_writeo_table(efx
, ®
, FR_BZ_TX_DESC_PTR_TBL
,
746 efx_writeo_table(efx
, ®
, FR_BZ_EVQ_PTR_TBL
,
748 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
,
751 efx_sriov_bufs(efx
, vf
->buftbl_base
, NULL
,
752 EFX_VF_BUFTBL_PER_VI
* efx_vf_size(efx
));
754 efx_vfdi_flush_clear(vf
);
758 return timeout
? 0 : VFDI_RC_ETIMEDOUT
;
761 static int efx_vfdi_insert_filter(struct efx_vf
*vf
)
763 struct efx_nic
*efx
= vf
->efx
;
764 struct vfdi_req
*req
= vf
->buf
.addr
;
765 unsigned vf_rxq
= req
->u
.mac_filter
.rxq
;
768 if (bad_vf_index(efx
, vf_rxq
) || vf
->rx_filtering
) {
770 netif_err(efx
, hw
, efx
->net_dev
,
771 "ERROR: Invalid INSERT_FILTER from %s: rxq %d "
772 "flags 0x%x\n", vf
->pci_name
, vf_rxq
,
773 req
->u
.mac_filter
.flags
);
774 return VFDI_RC_EINVAL
;
778 if (req
->u
.mac_filter
.flags
& VFDI_MAC_FILTER_FLAG_RSS
)
779 flags
|= EFX_FILTER_FLAG_RX_RSS
;
780 if (req
->u
.mac_filter
.flags
& VFDI_MAC_FILTER_FLAG_SCATTER
)
781 flags
|= EFX_FILTER_FLAG_RX_SCATTER
;
782 vf
->rx_filter_flags
= flags
;
783 vf
->rx_filter_qid
= vf_rxq
;
784 vf
->rx_filtering
= true;
786 efx_sriov_reset_rx_filter(vf
);
787 queue_work(vfdi_workqueue
, &efx
->peer_work
);
789 return VFDI_RC_SUCCESS
;
792 static int efx_vfdi_remove_all_filters(struct efx_vf
*vf
)
794 vf
->rx_filtering
= false;
795 efx_sriov_reset_rx_filter(vf
);
796 queue_work(vfdi_workqueue
, &vf
->efx
->peer_work
);
798 return VFDI_RC_SUCCESS
;
801 static int efx_vfdi_set_status_page(struct efx_vf
*vf
)
803 struct efx_nic
*efx
= vf
->efx
;
804 struct vfdi_req
*req
= vf
->buf
.addr
;
805 u64 page_count
= req
->u
.set_status_page
.peer_page_count
;
808 offsetof(struct vfdi_req
, u
.set_status_page
.peer_page_addr
[0]))
809 / sizeof(req
->u
.set_status_page
.peer_page_addr
[0]);
811 if (!req
->u
.set_status_page
.dma_addr
|| page_count
> max_page_count
) {
813 netif_err(efx
, hw
, efx
->net_dev
,
814 "ERROR: Invalid SET_STATUS_PAGE from %s\n",
816 return VFDI_RC_EINVAL
;
819 mutex_lock(&efx
->local_lock
);
820 mutex_lock(&vf
->status_lock
);
821 vf
->status_addr
= req
->u
.set_status_page
.dma_addr
;
823 kfree(vf
->peer_page_addrs
);
824 vf
->peer_page_addrs
= NULL
;
825 vf
->peer_page_count
= 0;
828 vf
->peer_page_addrs
= kcalloc(page_count
, sizeof(u64
),
830 if (vf
->peer_page_addrs
) {
831 memcpy(vf
->peer_page_addrs
,
832 req
->u
.set_status_page
.peer_page_addr
,
833 page_count
* sizeof(u64
));
834 vf
->peer_page_count
= page_count
;
838 __efx_sriov_push_vf_status(vf
);
839 mutex_unlock(&vf
->status_lock
);
840 mutex_unlock(&efx
->local_lock
);
842 return VFDI_RC_SUCCESS
;
845 static int efx_vfdi_clear_status_page(struct efx_vf
*vf
)
847 mutex_lock(&vf
->status_lock
);
849 mutex_unlock(&vf
->status_lock
);
851 return VFDI_RC_SUCCESS
;
854 typedef int (*efx_vfdi_op_t
)(struct efx_vf
*vf
);
856 static const efx_vfdi_op_t vfdi_ops
[VFDI_OP_LIMIT
] = {
857 [VFDI_OP_INIT_EVQ
] = efx_vfdi_init_evq
,
858 [VFDI_OP_INIT_TXQ
] = efx_vfdi_init_txq
,
859 [VFDI_OP_INIT_RXQ
] = efx_vfdi_init_rxq
,
860 [VFDI_OP_FINI_ALL_QUEUES
] = efx_vfdi_fini_all_queues
,
861 [VFDI_OP_INSERT_FILTER
] = efx_vfdi_insert_filter
,
862 [VFDI_OP_REMOVE_ALL_FILTERS
] = efx_vfdi_remove_all_filters
,
863 [VFDI_OP_SET_STATUS_PAGE
] = efx_vfdi_set_status_page
,
864 [VFDI_OP_CLEAR_STATUS_PAGE
] = efx_vfdi_clear_status_page
,
867 static void efx_sriov_vfdi(struct work_struct
*work
)
869 struct efx_vf
*vf
= container_of(work
, struct efx_vf
, req
);
870 struct efx_nic
*efx
= vf
->efx
;
871 struct vfdi_req
*req
= vf
->buf
.addr
;
872 struct efx_memcpy_req copy
[2];
875 /* Copy this page into the local address space */
876 memset(copy
, '\0', sizeof(copy
));
877 copy
[0].from_rid
= vf
->pci_rid
;
878 copy
[0].from_addr
= vf
->req_addr
;
879 copy
[0].to_rid
= efx
->pci_dev
->devfn
;
880 copy
[0].to_addr
= vf
->buf
.dma_addr
;
881 copy
[0].length
= EFX_PAGE_SIZE
;
882 rc
= efx_sriov_memcpy(efx
, copy
, 1);
884 /* If we can't get the request, we can't reply to the caller */
886 netif_err(efx
, hw
, efx
->net_dev
,
887 "ERROR: Unable to fetch VFDI request from %s rc %d\n",
893 if (req
->op
< VFDI_OP_LIMIT
&& vfdi_ops
[req
->op
] != NULL
) {
894 rc
= vfdi_ops
[req
->op
](vf
);
896 netif_dbg(efx
, hw
, efx
->net_dev
,
897 "vfdi request %d from %s ok\n",
898 req
->op
, vf
->pci_name
);
901 netif_dbg(efx
, hw
, efx
->net_dev
,
902 "ERROR: Unrecognised request %d from VF %s addr "
903 "%llx\n", req
->op
, vf
->pci_name
,
904 (unsigned long long)vf
->req_addr
);
905 rc
= VFDI_RC_EOPNOTSUPP
;
908 /* Allow subsequent VF requests */
912 /* Respond to the request */
914 req
->op
= VFDI_OP_RESPONSE
;
916 memset(copy
, '\0', sizeof(copy
));
917 copy
[0].from_buf
= &req
->rc
;
918 copy
[0].to_rid
= vf
->pci_rid
;
919 copy
[0].to_addr
= vf
->req_addr
+ offsetof(struct vfdi_req
, rc
);
920 copy
[0].length
= sizeof(req
->rc
);
921 copy
[1].from_buf
= &req
->op
;
922 copy
[1].to_rid
= vf
->pci_rid
;
923 copy
[1].to_addr
= vf
->req_addr
+ offsetof(struct vfdi_req
, op
);
924 copy
[1].length
= sizeof(req
->op
);
926 (void) efx_sriov_memcpy(efx
, copy
, ARRAY_SIZE(copy
));
931 /* After a reset the event queues inside the guests no longer exist. Fill the
932 * event ring in guest memory with VFDI reset events, then (re-initialise) the
933 * event queue to raise an interrupt. The guest driver will then recover.
935 static void efx_sriov_reset_vf(struct efx_vf
*vf
, struct efx_buffer
*buffer
)
937 struct efx_nic
*efx
= vf
->efx
;
938 struct efx_memcpy_req copy_req
[4];
940 unsigned int pos
, count
, k
, buftbl
, abs_evq
;
945 BUG_ON(buffer
->len
!= EFX_PAGE_SIZE
);
949 BUG_ON(vf
->evq0_count
& (vf
->evq0_count
- 1));
951 mutex_lock(&vf
->status_lock
);
952 EFX_POPULATE_QWORD_3(event
,
953 FSF_AZ_EV_CODE
, FSE_CZ_EV_CODE_USER_EV
,
954 VFDI_EV_SEQ
, vf
->msg_seqno
,
955 VFDI_EV_TYPE
, VFDI_EV_TYPE_RESET
);
957 for (pos
= 0; pos
< EFX_PAGE_SIZE
; pos
+= sizeof(event
))
958 memcpy(buffer
->addr
+ pos
, &event
, sizeof(event
));
960 for (pos
= 0; pos
< vf
->evq0_count
; pos
+= count
) {
961 count
= min_t(unsigned, vf
->evq0_count
- pos
,
962 ARRAY_SIZE(copy_req
));
963 for (k
= 0; k
< count
; k
++) {
964 copy_req
[k
].from_buf
= NULL
;
965 copy_req
[k
].from_rid
= efx
->pci_dev
->devfn
;
966 copy_req
[k
].from_addr
= buffer
->dma_addr
;
967 copy_req
[k
].to_rid
= vf
->pci_rid
;
968 copy_req
[k
].to_addr
= vf
->evq0_addrs
[pos
+ k
];
969 copy_req
[k
].length
= EFX_PAGE_SIZE
;
971 rc
= efx_sriov_memcpy(efx
, copy_req
, count
);
974 netif_err(efx
, hw
, efx
->net_dev
,
975 "ERROR: Unable to notify %s of reset"
976 ": %d\n", vf
->pci_name
, -rc
);
981 /* Reinitialise, arm and trigger evq0 */
982 abs_evq
= abs_index(vf
, 0);
983 buftbl
= EFX_BUFTBL_EVQ_BASE(vf
, 0);
984 efx_sriov_bufs(efx
, buftbl
, vf
->evq0_addrs
, vf
->evq0_count
);
986 EFX_POPULATE_OWORD_3(reg
,
987 FRF_CZ_TIMER_Q_EN
, 1,
988 FRF_CZ_HOST_NOTIFY_MODE
, 0,
989 FRF_CZ_TIMER_MODE
, FFE_CZ_TIMER_MODE_DIS
);
990 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, abs_evq
);
991 EFX_POPULATE_OWORD_3(reg
,
993 FRF_AZ_EVQ_SIZE
, __ffs(vf
->evq0_count
),
994 FRF_AZ_EVQ_BUF_BASE_ID
, buftbl
);
995 efx_writeo_table(efx
, ®
, FR_BZ_EVQ_PTR_TBL
, abs_evq
);
996 EFX_POPULATE_DWORD_1(ptr
, FRF_AZ_EVQ_RPTR
, 0);
997 efx_writed(efx
, &ptr
, FR_BZ_EVQ_RPTR
+ FR_BZ_EVQ_RPTR_STEP
* abs_evq
);
999 mutex_unlock(&vf
->status_lock
);
1002 static void efx_sriov_reset_vf_work(struct work_struct
*work
)
1004 struct efx_vf
*vf
= container_of(work
, struct efx_vf
, req
);
1005 struct efx_nic
*efx
= vf
->efx
;
1006 struct efx_buffer buf
;
1008 if (!efx_nic_alloc_buffer(efx
, &buf
, EFX_PAGE_SIZE
)) {
1009 efx_sriov_reset_vf(vf
, &buf
);
1010 efx_nic_free_buffer(efx
, &buf
);
1014 static void efx_sriov_handle_no_channel(struct efx_nic
*efx
)
1016 netif_err(efx
, drv
, efx
->net_dev
,
1017 "ERROR: IOV requires MSI-X and 1 additional interrupt"
1018 "vector. IOV disabled\n");
1022 static int efx_sriov_probe_channel(struct efx_channel
*channel
)
1024 channel
->efx
->vfdi_channel
= channel
;
1029 efx_sriov_get_channel_name(struct efx_channel
*channel
, char *buf
, size_t len
)
1031 snprintf(buf
, len
, "%s-iov", channel
->efx
->name
);
1034 static const struct efx_channel_type efx_sriov_channel_type
= {
1035 .handle_no_channel
= efx_sriov_handle_no_channel
,
1036 .pre_probe
= efx_sriov_probe_channel
,
1037 .post_remove
= efx_channel_dummy_op_void
,
1038 .get_name
= efx_sriov_get_channel_name
,
1039 /* no copy operation; channel must not be reallocated */
1040 .keep_eventq
= true,
1043 void efx_sriov_probe(struct efx_nic
*efx
)
1050 if (efx_sriov_cmd(efx
, false, &efx
->vi_scale
, &count
))
1052 if (count
> 0 && count
> max_vfs
)
1055 /* efx_nic_dimension_resources() will reduce vf_count as appopriate */
1056 efx
->vf_count
= count
;
1058 efx
->extra_channel_type
[EFX_EXTRA_CHANNEL_IOV
] = &efx_sriov_channel_type
;
1061 /* Copy the list of individual addresses into the vfdi_status.peers
1062 * array and auxillary pages, protected by %local_lock. Drop that lock
1063 * and then broadcast the address list to every VF.
1065 static void efx_sriov_peer_work(struct work_struct
*data
)
1067 struct efx_nic
*efx
= container_of(data
, struct efx_nic
, peer_work
);
1068 struct vfdi_status
*vfdi_status
= efx
->vfdi_status
.addr
;
1070 struct efx_local_addr
*local_addr
;
1071 struct vfdi_endpoint
*peer
;
1072 struct efx_endpoint_page
*epp
;
1073 struct list_head pages
;
1074 unsigned int peer_space
;
1075 unsigned int peer_count
;
1078 mutex_lock(&efx
->local_lock
);
1080 /* Move the existing peer pages off %local_page_list */
1081 INIT_LIST_HEAD(&pages
);
1082 list_splice_tail_init(&efx
->local_page_list
, &pages
);
1084 /* Populate the VF addresses starting from entry 1 (entry 0 is
1087 peer
= vfdi_status
->peers
+ 1;
1088 peer_space
= ARRAY_SIZE(vfdi_status
->peers
) - 1;
1090 for (pos
= 0; pos
< efx
->vf_count
; ++pos
) {
1093 mutex_lock(&vf
->status_lock
);
1094 if (vf
->rx_filtering
&& !is_zero_ether_addr(vf
->addr
.mac_addr
)) {
1098 BUG_ON(peer_space
== 0);
1100 mutex_unlock(&vf
->status_lock
);
1103 /* Fill the remaining addresses */
1104 list_for_each_entry(local_addr
, &efx
->local_addr_list
, link
) {
1105 memcpy(peer
->mac_addr
, local_addr
->addr
, ETH_ALEN
);
1109 if (--peer_space
== 0) {
1110 if (list_empty(&pages
)) {
1111 epp
= kmalloc(sizeof(*epp
), GFP_KERNEL
);
1114 epp
->ptr
= dma_alloc_coherent(
1115 &efx
->pci_dev
->dev
, EFX_PAGE_SIZE
,
1116 &epp
->addr
, GFP_KERNEL
);
1122 epp
= list_first_entry(
1123 &pages
, struct efx_endpoint_page
, link
);
1124 list_del(&epp
->link
);
1127 list_add_tail(&epp
->link
, &efx
->local_page_list
);
1128 peer
= (struct vfdi_endpoint
*)epp
->ptr
;
1129 peer_space
= EFX_PAGE_SIZE
/ sizeof(struct vfdi_endpoint
);
1132 vfdi_status
->peer_count
= peer_count
;
1133 mutex_unlock(&efx
->local_lock
);
1135 /* Free any now unused endpoint pages */
1136 while (!list_empty(&pages
)) {
1137 epp
= list_first_entry(
1138 &pages
, struct efx_endpoint_page
, link
);
1139 list_del(&epp
->link
);
1140 dma_free_coherent(&efx
->pci_dev
->dev
, EFX_PAGE_SIZE
,
1141 epp
->ptr
, epp
->addr
);
1145 /* Finally, push the pages */
1146 for (pos
= 0; pos
< efx
->vf_count
; ++pos
) {
1149 mutex_lock(&vf
->status_lock
);
1150 if (vf
->status_addr
)
1151 __efx_sriov_push_vf_status(vf
);
1152 mutex_unlock(&vf
->status_lock
);
1156 static void efx_sriov_free_local(struct efx_nic
*efx
)
1158 struct efx_local_addr
*local_addr
;
1159 struct efx_endpoint_page
*epp
;
1161 while (!list_empty(&efx
->local_addr_list
)) {
1162 local_addr
= list_first_entry(&efx
->local_addr_list
,
1163 struct efx_local_addr
, link
);
1164 list_del(&local_addr
->link
);
1168 while (!list_empty(&efx
->local_page_list
)) {
1169 epp
= list_first_entry(&efx
->local_page_list
,
1170 struct efx_endpoint_page
, link
);
1171 list_del(&epp
->link
);
1172 dma_free_coherent(&efx
->pci_dev
->dev
, EFX_PAGE_SIZE
,
1173 epp
->ptr
, epp
->addr
);
1178 static int efx_sriov_vf_alloc(struct efx_nic
*efx
)
1183 efx
->vf
= kzalloc(sizeof(struct efx_vf
) * efx
->vf_count
, GFP_KERNEL
);
1187 for (index
= 0; index
< efx
->vf_count
; ++index
) {
1188 vf
= efx
->vf
+ index
;
1192 vf
->rx_filter_id
= -1;
1193 vf
->tx_filter_mode
= VF_TX_FILTER_AUTO
;
1194 vf
->tx_filter_id
= -1;
1195 INIT_WORK(&vf
->req
, efx_sriov_vfdi
);
1196 INIT_WORK(&vf
->reset_work
, efx_sriov_reset_vf_work
);
1197 init_waitqueue_head(&vf
->flush_waitq
);
1198 mutex_init(&vf
->status_lock
);
1199 mutex_init(&vf
->txq_lock
);
1205 static void efx_sriov_vfs_fini(struct efx_nic
*efx
)
1210 for (pos
= 0; pos
< efx
->vf_count
; ++pos
) {
1213 efx_nic_free_buffer(efx
, &vf
->buf
);
1214 kfree(vf
->peer_page_addrs
);
1215 vf
->peer_page_addrs
= NULL
;
1216 vf
->peer_page_count
= 0;
1222 static int efx_sriov_vfs_init(struct efx_nic
*efx
)
1224 struct pci_dev
*pci_dev
= efx
->pci_dev
;
1225 unsigned index
, devfn
, sriov
, buftbl_base
;
1230 sriov
= pci_find_ext_capability(pci_dev
, PCI_EXT_CAP_ID_SRIOV
);
1234 pci_read_config_word(pci_dev
, sriov
+ PCI_SRIOV_VF_OFFSET
, &offset
);
1235 pci_read_config_word(pci_dev
, sriov
+ PCI_SRIOV_VF_STRIDE
, &stride
);
1237 buftbl_base
= efx
->vf_buftbl_base
;
1238 devfn
= pci_dev
->devfn
+ offset
;
1239 for (index
= 0; index
< efx
->vf_count
; ++index
) {
1240 vf
= efx
->vf
+ index
;
1242 /* Reserve buffer entries */
1243 vf
->buftbl_base
= buftbl_base
;
1244 buftbl_base
+= EFX_VF_BUFTBL_PER_VI
* efx_vf_size(efx
);
1246 vf
->pci_rid
= devfn
;
1247 snprintf(vf
->pci_name
, sizeof(vf
->pci_name
),
1248 "%04x:%02x:%02x.%d",
1249 pci_domain_nr(pci_dev
->bus
), pci_dev
->bus
->number
,
1250 PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1252 rc
= efx_nic_alloc_buffer(efx
, &vf
->buf
, EFX_PAGE_SIZE
);
1262 efx_sriov_vfs_fini(efx
);
1266 int efx_sriov_init(struct efx_nic
*efx
)
1268 struct net_device
*net_dev
= efx
->net_dev
;
1269 struct vfdi_status
*vfdi_status
;
1272 /* Ensure there's room for vf_channel */
1273 BUILD_BUG_ON(EFX_MAX_CHANNELS
+ 1 >= EFX_VI_BASE
);
1274 /* Ensure that VI_BASE is aligned on VI_SCALE */
1275 BUILD_BUG_ON(EFX_VI_BASE
& ((1 << EFX_VI_SCALE_MAX
) - 1));
1277 if (efx
->vf_count
== 0)
1280 rc
= efx_sriov_cmd(efx
, true, NULL
, NULL
);
1284 rc
= efx_nic_alloc_buffer(efx
, &efx
->vfdi_status
, sizeof(*vfdi_status
));
1287 vfdi_status
= efx
->vfdi_status
.addr
;
1288 memset(vfdi_status
, 0, sizeof(*vfdi_status
));
1289 vfdi_status
->version
= 1;
1290 vfdi_status
->length
= sizeof(*vfdi_status
);
1291 vfdi_status
->max_tx_channels
= vf_max_tx_channels
;
1292 vfdi_status
->vi_scale
= efx
->vi_scale
;
1293 vfdi_status
->rss_rxq_count
= efx
->rss_spread
;
1294 vfdi_status
->peer_count
= 1 + efx
->vf_count
;
1295 vfdi_status
->timer_quantum_ns
= efx
->timer_quantum_ns
;
1297 rc
= efx_sriov_vf_alloc(efx
);
1301 mutex_init(&efx
->local_lock
);
1302 INIT_WORK(&efx
->peer_work
, efx_sriov_peer_work
);
1303 INIT_LIST_HEAD(&efx
->local_addr_list
);
1304 INIT_LIST_HEAD(&efx
->local_page_list
);
1306 rc
= efx_sriov_vfs_init(efx
);
1311 memcpy(vfdi_status
->peers
[0].mac_addr
,
1312 net_dev
->dev_addr
, ETH_ALEN
);
1313 efx
->vf_init_count
= efx
->vf_count
;
1316 efx_sriov_usrev(efx
, true);
1318 /* At this point we must be ready to accept VFDI requests */
1320 rc
= pci_enable_sriov(efx
->pci_dev
, efx
->vf_count
);
1324 netif_info(efx
, probe
, net_dev
,
1325 "enabled SR-IOV for %d VFs, %d VI per VF\n",
1326 efx
->vf_count
, efx_vf_size(efx
));
1330 efx_sriov_usrev(efx
, false);
1332 efx
->vf_init_count
= 0;
1334 efx_sriov_vfs_fini(efx
);
1336 cancel_work_sync(&efx
->peer_work
);
1337 efx_sriov_free_local(efx
);
1340 efx_nic_free_buffer(efx
, &efx
->vfdi_status
);
1342 efx_sriov_cmd(efx
, false, NULL
, NULL
);
1347 void efx_sriov_fini(struct efx_nic
*efx
)
1352 if (efx
->vf_init_count
== 0)
1355 /* Disable all interfaces to reconfiguration */
1356 BUG_ON(efx
->vfdi_channel
->enabled
);
1357 efx_sriov_usrev(efx
, false);
1359 efx
->vf_init_count
= 0;
1362 /* Flush all reconfiguration work */
1363 for (pos
= 0; pos
< efx
->vf_count
; ++pos
) {
1365 cancel_work_sync(&vf
->req
);
1366 cancel_work_sync(&vf
->reset_work
);
1368 cancel_work_sync(&efx
->peer_work
);
1370 pci_disable_sriov(efx
->pci_dev
);
1372 /* Tear down back-end state */
1373 efx_sriov_vfs_fini(efx
);
1374 efx_sriov_free_local(efx
);
1376 efx_nic_free_buffer(efx
, &efx
->vfdi_status
);
1377 efx_sriov_cmd(efx
, false, NULL
, NULL
);
1380 void efx_sriov_event(struct efx_channel
*channel
, efx_qword_t
*event
)
1382 struct efx_nic
*efx
= channel
->efx
;
1384 unsigned qid
, seq
, type
, data
;
1386 qid
= EFX_QWORD_FIELD(*event
, FSF_CZ_USER_QID
);
1388 /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */
1389 BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN
!= 0);
1390 seq
= EFX_QWORD_FIELD(*event
, VFDI_EV_SEQ
);
1391 type
= EFX_QWORD_FIELD(*event
, VFDI_EV_TYPE
);
1392 data
= EFX_QWORD_FIELD(*event
, VFDI_EV_DATA
);
1394 netif_vdbg(efx
, hw
, efx
->net_dev
,
1395 "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n",
1396 qid
, seq
, type
, data
);
1398 if (map_vi_index(efx
, qid
, &vf
, NULL
))
1403 if (type
== VFDI_EV_TYPE_REQ_WORD0
) {
1405 vf
->req_type
= VFDI_EV_TYPE_REQ_WORD0
;
1406 vf
->req_seqno
= seq
+ 1;
1408 } else if (seq
!= (vf
->req_seqno
++ & 0xff) || type
!= vf
->req_type
)
1411 switch (vf
->req_type
) {
1412 case VFDI_EV_TYPE_REQ_WORD0
:
1413 case VFDI_EV_TYPE_REQ_WORD1
:
1414 case VFDI_EV_TYPE_REQ_WORD2
:
1415 vf
->req_addr
|= (u64
)data
<< (vf
->req_type
<< 4);
1419 case VFDI_EV_TYPE_REQ_WORD3
:
1420 vf
->req_addr
|= (u64
)data
<< 48;
1421 vf
->req_type
= VFDI_EV_TYPE_REQ_WORD0
;
1423 queue_work(vfdi_workqueue
, &vf
->req
);
1428 if (net_ratelimit())
1429 netif_err(efx
, hw
, efx
->net_dev
,
1430 "ERROR: Screaming VFDI request from %s\n",
1432 /* Reset the request and sequence number */
1433 vf
->req_type
= VFDI_EV_TYPE_REQ_WORD0
;
1434 vf
->req_seqno
= seq
+ 1;
1437 void efx_sriov_flr(struct efx_nic
*efx
, unsigned vf_i
)
1441 if (vf_i
> efx
->vf_init_count
)
1443 vf
= efx
->vf
+ vf_i
;
1444 netif_info(efx
, hw
, efx
->net_dev
,
1445 "FLR on VF %s\n", vf
->pci_name
);
1447 vf
->status_addr
= 0;
1448 efx_vfdi_remove_all_filters(vf
);
1449 efx_vfdi_flush_clear(vf
);
1454 void efx_sriov_mac_address_changed(struct efx_nic
*efx
)
1456 struct vfdi_status
*vfdi_status
= efx
->vfdi_status
.addr
;
1458 if (!efx
->vf_init_count
)
1460 memcpy(vfdi_status
->peers
[0].mac_addr
,
1461 efx
->net_dev
->dev_addr
, ETH_ALEN
);
1462 queue_work(vfdi_workqueue
, &efx
->peer_work
);
1465 void efx_sriov_tx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1468 unsigned queue
, qid
;
1470 queue
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1471 if (map_vi_index(efx
, queue
, &vf
, &qid
))
1473 /* Ignore flush completions triggered by an FLR */
1474 if (!test_bit(qid
, vf
->txq_mask
))
1477 __clear_bit(qid
, vf
->txq_mask
);
1480 if (efx_vfdi_flush_wake(vf
))
1481 wake_up(&vf
->flush_waitq
);
1484 void efx_sriov_rx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1487 unsigned ev_failed
, queue
, qid
;
1489 queue
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_DESCQ_ID
);
1490 ev_failed
= EFX_QWORD_FIELD(*event
,
1491 FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL
);
1492 if (map_vi_index(efx
, queue
, &vf
, &qid
))
1494 if (!test_bit(qid
, vf
->rxq_mask
))
1498 set_bit(qid
, vf
->rxq_retry_mask
);
1499 atomic_inc(&vf
->rxq_retry_count
);
1501 __clear_bit(qid
, vf
->rxq_mask
);
1504 if (efx_vfdi_flush_wake(vf
))
1505 wake_up(&vf
->flush_waitq
);
1508 /* Called from napi. Schedule the reset work item */
1509 void efx_sriov_desc_fetch_err(struct efx_nic
*efx
, unsigned dmaq
)
1514 if (map_vi_index(efx
, dmaq
, &vf
, &rel
))
1517 if (net_ratelimit())
1518 netif_err(efx
, hw
, efx
->net_dev
,
1519 "VF %d DMA Q %d reports descriptor fetch error.\n",
1521 queue_work(vfdi_workqueue
, &vf
->reset_work
);
1525 void efx_sriov_reset(struct efx_nic
*efx
)
1528 struct efx_buffer buf
;
1533 if (efx
->vf_init_count
== 0)
1536 efx_sriov_usrev(efx
, true);
1537 (void)efx_sriov_cmd(efx
, true, NULL
, NULL
);
1539 if (efx_nic_alloc_buffer(efx
, &buf
, EFX_PAGE_SIZE
))
1542 for (vf_i
= 0; vf_i
< efx
->vf_init_count
; ++vf_i
) {
1543 vf
= efx
->vf
+ vf_i
;
1544 efx_sriov_reset_vf(vf
, &buf
);
1547 efx_nic_free_buffer(efx
, &buf
);
1550 int efx_init_sriov(void)
1552 /* A single threaded workqueue is sufficient. efx_sriov_vfdi() and
1553 * efx_sriov_peer_work() spend almost all their time sleeping for
1554 * MCDI to complete anyway
1556 vfdi_workqueue
= create_singlethread_workqueue("sfc_vfdi");
1557 if (!vfdi_workqueue
)
1563 void efx_fini_sriov(void)
1565 destroy_workqueue(vfdi_workqueue
);
1568 int efx_sriov_set_vf_mac(struct net_device
*net_dev
, int vf_i
, u8
*mac
)
1570 struct efx_nic
*efx
= netdev_priv(net_dev
);
1573 if (vf_i
>= efx
->vf_init_count
)
1575 vf
= efx
->vf
+ vf_i
;
1577 mutex_lock(&vf
->status_lock
);
1578 memcpy(vf
->addr
.mac_addr
, mac
, ETH_ALEN
);
1579 __efx_sriov_update_vf_addr(vf
);
1580 mutex_unlock(&vf
->status_lock
);
1585 int efx_sriov_set_vf_vlan(struct net_device
*net_dev
, int vf_i
,
1588 struct efx_nic
*efx
= netdev_priv(net_dev
);
1592 if (vf_i
>= efx
->vf_init_count
)
1594 vf
= efx
->vf
+ vf_i
;
1596 mutex_lock(&vf
->status_lock
);
1597 tci
= (vlan
& VLAN_VID_MASK
) | ((qos
& 0x7) << VLAN_PRIO_SHIFT
);
1598 vf
->addr
.tci
= htons(tci
);
1599 __efx_sriov_update_vf_addr(vf
);
1600 mutex_unlock(&vf
->status_lock
);
1605 int efx_sriov_set_vf_spoofchk(struct net_device
*net_dev
, int vf_i
,
1608 struct efx_nic
*efx
= netdev_priv(net_dev
);
1612 if (vf_i
>= efx
->vf_init_count
)
1614 vf
= efx
->vf
+ vf_i
;
1616 mutex_lock(&vf
->txq_lock
);
1617 if (vf
->txq_count
== 0) {
1618 vf
->tx_filter_mode
=
1619 spoofchk
? VF_TX_FILTER_ON
: VF_TX_FILTER_OFF
;
1622 /* This cannot be changed while TX queues are running */
1625 mutex_unlock(&vf
->txq_lock
);
1629 int efx_sriov_get_vf_config(struct net_device
*net_dev
, int vf_i
,
1630 struct ifla_vf_info
*ivi
)
1632 struct efx_nic
*efx
= netdev_priv(net_dev
);
1636 if (vf_i
>= efx
->vf_init_count
)
1638 vf
= efx
->vf
+ vf_i
;
1641 memcpy(ivi
->mac
, vf
->addr
.mac_addr
, ETH_ALEN
);
1643 tci
= ntohs(vf
->addr
.tci
);
1644 ivi
->vlan
= tci
& VLAN_VID_MASK
;
1645 ivi
->qos
= (tci
>> VLAN_PRIO_SHIFT
) & 0x7;
1646 ivi
->spoofchk
= vf
->tx_filter_mode
== VF_TX_FILTER_ON
;