Commit | Line | Data |
---|---|---|
1408cc1f YM |
1 | /* QLogic qed NIC Driver |
2 | * Copyright (c) 2015 QLogic Corporation | |
3 | * | |
4 | * This software is available under the terms of the GNU General Public License | |
5 | * (GPL) Version 2, available from the file COPYING in the main directory of | |
6 | * this source tree. | |
7 | */ | |
8 | ||
36558c3d | 9 | #include <linux/crc32.h> |
1408cc1f YM |
10 | #include "qed.h" |
11 | #include "qed_sriov.h" | |
12 | #include "qed_vf.h" | |
13 | ||
14 | static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) | |
15 | { | |
16 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
17 | void *p_tlv; | |
18 | ||
19 | /* This lock is released when we receive PF's response | |
20 | * in qed_send_msg2pf(). | |
21 | * So, qed_vf_pf_prep() and qed_send_msg2pf() | |
22 | * must come in sequence. | |
23 | */ | |
24 | mutex_lock(&(p_iov->mutex)); | |
25 | ||
26 | DP_VERBOSE(p_hwfn, | |
27 | QED_MSG_IOV, | |
28 | "preparing to send 0x%04x tlv over vf pf channel\n", | |
29 | type); | |
30 | ||
31 | /* Reset Requst offset */ | |
32 | p_iov->offset = (u8 *)p_iov->vf2pf_request; | |
33 | ||
34 | /* Clear mailbox - both request and reply */ | |
35 | memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); | |
36 | memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); | |
37 | ||
38 | /* Init type and length */ | |
39 | p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length); | |
40 | ||
41 | /* Init first tlv header */ | |
42 | ((struct vfpf_first_tlv *)p_tlv)->reply_address = | |
43 | (u64)p_iov->pf2vf_reply_phys; | |
44 | ||
45 | return p_tlv; | |
46 | } | |
47 | ||
48 | static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) | |
49 | { | |
50 | union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; | |
51 | struct ustorm_trigger_vf_zone trigger; | |
52 | struct ustorm_vf_zone *zone_data; | |
53 | int rc = 0, time = 100; | |
54 | ||
55 | zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; | |
56 | ||
57 | /* output tlvs list */ | |
58 | qed_dp_tlv_list(p_hwfn, p_req); | |
59 | ||
60 | /* need to add the END TLV to the message size */ | |
61 | resp_size += sizeof(struct channel_list_end_tlv); | |
62 | ||
63 | /* Send TLVs over HW channel */ | |
64 | memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); | |
65 | trigger.vf_pf_msg_valid = 1; | |
66 | ||
67 | DP_VERBOSE(p_hwfn, | |
68 | QED_MSG_IOV, | |
69 | "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", | |
70 | GET_FIELD(p_hwfn->hw_info.concrete_fid, | |
71 | PXP_CONCRETE_FID_PFID), | |
72 | upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), | |
73 | lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), | |
74 | &zone_data->non_trigger.vf_pf_msg_addr, | |
75 | *((u32 *)&trigger), &zone_data->trigger); | |
76 | ||
77 | REG_WR(p_hwfn, | |
78 | (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, | |
79 | lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); | |
80 | ||
81 | REG_WR(p_hwfn, | |
82 | (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, | |
83 | upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); | |
84 | ||
85 | /* The message data must be written first, to prevent trigger before | |
86 | * data is written. | |
87 | */ | |
88 | wmb(); | |
89 | ||
90 | REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); | |
91 | ||
92 | /* When PF would be done with the response, it would write back to the | |
93 | * `done' address. Poll until then. | |
94 | */ | |
95 | while ((!*done) && time) { | |
96 | msleep(25); | |
97 | time--; | |
98 | } | |
99 | ||
100 | if (!*done) { | |
101 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
102 | "VF <-- PF Timeout [Type %d]\n", | |
103 | p_req->first_tlv.tl.type); | |
104 | rc = -EBUSY; | |
105 | goto exit; | |
106 | } else { | |
107 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
108 | "PF response: %d [Type %d]\n", | |
109 | *done, p_req->first_tlv.tl.type); | |
110 | } | |
111 | ||
112 | exit: | |
113 | mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); | |
114 | ||
115 | return rc; | |
116 | } | |
117 | ||
118 | #define VF_ACQUIRE_THRESH 3 | |
119 | #define VF_ACQUIRE_MAC_FILTERS 1 | |
120 | ||
121 | static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) | |
122 | { | |
123 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
124 | struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; | |
125 | struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; | |
126 | u8 rx_count = 1, tx_count = 1, num_sbs = 1; | |
127 | u8 num_mac = VF_ACQUIRE_MAC_FILTERS; | |
128 | bool resources_acquired = false; | |
129 | struct vfpf_acquire_tlv *req; | |
130 | int rc = 0, attempts = 0; | |
131 | ||
132 | /* clear mailbox and prep first tlv */ | |
133 | req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); | |
134 | ||
135 | /* starting filling the request */ | |
136 | req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; | |
137 | ||
138 | req->resc_request.num_rxqs = rx_count; | |
139 | req->resc_request.num_txqs = tx_count; | |
140 | req->resc_request.num_sbs = num_sbs; | |
141 | req->resc_request.num_mac_filters = num_mac; | |
142 | req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; | |
143 | ||
144 | req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; | |
145 | req->vfdev_info.fw_major = FW_MAJOR_VERSION; | |
146 | req->vfdev_info.fw_minor = FW_MINOR_VERSION; | |
147 | req->vfdev_info.fw_revision = FW_REVISION_VERSION; | |
148 | req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; | |
149 | ||
150 | /* Fill capability field with any non-deprecated config we support */ | |
151 | req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; | |
152 | ||
153 | /* pf 2 vf bulletin board address */ | |
154 | req->bulletin_addr = p_iov->bulletin.phys; | |
155 | req->bulletin_size = p_iov->bulletin.size; | |
156 | ||
157 | /* add list termination tlv */ | |
158 | qed_add_tlv(p_hwfn, &p_iov->offset, | |
159 | CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); | |
160 | ||
161 | while (!resources_acquired) { | |
162 | DP_VERBOSE(p_hwfn, | |
163 | QED_MSG_IOV, "attempting to acquire resources\n"); | |
164 | ||
165 | /* send acquire request */ | |
166 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); | |
167 | if (rc) | |
168 | return rc; | |
169 | ||
170 | /* copy acquire response from buffer to p_hwfn */ | |
171 | memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp)); | |
172 | ||
173 | attempts++; | |
174 | ||
175 | if (resp->hdr.status == PFVF_STATUS_SUCCESS) { | |
176 | /* PF agrees to allocate our resources */ | |
177 | if (!(resp->pfdev_info.capabilities & | |
178 | PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { | |
179 | DP_INFO(p_hwfn, | |
180 | "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n"); | |
181 | return -EINVAL; | |
182 | } | |
183 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n"); | |
184 | resources_acquired = true; | |
185 | } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && | |
186 | attempts < VF_ACQUIRE_THRESH) { | |
187 | DP_VERBOSE(p_hwfn, | |
188 | QED_MSG_IOV, | |
189 | "PF unwilling to fullfill resource request. Try PF recommended amount\n"); | |
190 | ||
191 | /* humble our request */ | |
192 | req->resc_request.num_txqs = resp->resc.num_txqs; | |
193 | req->resc_request.num_rxqs = resp->resc.num_rxqs; | |
194 | req->resc_request.num_sbs = resp->resc.num_sbs; | |
195 | req->resc_request.num_mac_filters = | |
196 | resp->resc.num_mac_filters; | |
197 | req->resc_request.num_vlan_filters = | |
198 | resp->resc.num_vlan_filters; | |
199 | ||
200 | /* Clear response buffer */ | |
201 | memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); | |
202 | } else { | |
203 | DP_ERR(p_hwfn, | |
204 | "PF returned error %d to VF acquisition request\n", | |
205 | resp->hdr.status); | |
206 | return -EAGAIN; | |
207 | } | |
208 | } | |
209 | ||
210 | /* Update bulletin board size with response from PF */ | |
211 | p_iov->bulletin.size = resp->bulletin_size; | |
212 | ||
213 | /* get HW info */ | |
214 | p_hwfn->cdev->type = resp->pfdev_info.dev_type; | |
215 | p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev; | |
216 | ||
217 | p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff; | |
218 | ||
219 | /* Learn of the possibility of CMT */ | |
220 | if (IS_LEAD_HWFN(p_hwfn)) { | |
221 | if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { | |
222 | DP_NOTICE(p_hwfn, "100g VF\n"); | |
223 | p_hwfn->cdev->num_hwfns = 2; | |
224 | } | |
225 | } | |
226 | ||
227 | return 0; | |
228 | } | |
229 | ||
230 | int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) | |
231 | { | |
232 | struct qed_vf_iov *p_iov; | |
233 | u32 reg; | |
234 | ||
235 | /* Set number of hwfns - might be overriden once leading hwfn learns | |
236 | * actual configuration from PF. | |
237 | */ | |
238 | if (IS_LEAD_HWFN(p_hwfn)) | |
239 | p_hwfn->cdev->num_hwfns = 1; | |
240 | ||
241 | /* Set the doorbell bar. Assumption: regview is set */ | |
242 | p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + | |
243 | PXP_VF_BAR0_START_DQ; | |
244 | ||
245 | reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; | |
246 | p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); | |
247 | ||
248 | reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; | |
249 | p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); | |
250 | ||
251 | /* Allocate vf sriov info */ | |
252 | p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL); | |
253 | if (!p_iov) { | |
254 | DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n"); | |
255 | return -ENOMEM; | |
256 | } | |
257 | ||
258 | /* Allocate vf2pf msg */ | |
259 | p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | |
260 | sizeof(union vfpf_tlvs), | |
261 | &p_iov->vf2pf_request_phys, | |
262 | GFP_KERNEL); | |
263 | if (!p_iov->vf2pf_request) { | |
264 | DP_NOTICE(p_hwfn, | |
265 | "Failed to allocate `vf2pf_request' DMA memory\n"); | |
266 | goto free_p_iov; | |
267 | } | |
268 | ||
269 | p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | |
270 | sizeof(union pfvf_tlvs), | |
271 | &p_iov->pf2vf_reply_phys, | |
272 | GFP_KERNEL); | |
273 | if (!p_iov->pf2vf_reply) { | |
274 | DP_NOTICE(p_hwfn, | |
275 | "Failed to allocate `pf2vf_reply' DMA memory\n"); | |
276 | goto free_vf2pf_request; | |
277 | } | |
278 | ||
279 | DP_VERBOSE(p_hwfn, | |
280 | QED_MSG_IOV, | |
281 | "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", | |
282 | p_iov->vf2pf_request, | |
283 | (u64) p_iov->vf2pf_request_phys, | |
284 | p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); | |
285 | ||
286 | /* Allocate Bulletin board */ | |
287 | p_iov->bulletin.size = sizeof(struct qed_bulletin_content); | |
288 | p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | |
289 | p_iov->bulletin.size, | |
290 | &p_iov->bulletin.phys, | |
291 | GFP_KERNEL); | |
292 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
293 | "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", | |
294 | p_iov->bulletin.p_virt, | |
295 | (u64)p_iov->bulletin.phys, p_iov->bulletin.size); | |
296 | ||
297 | mutex_init(&p_iov->mutex); | |
298 | ||
299 | p_hwfn->vf_iov_info = p_iov; | |
300 | ||
301 | p_hwfn->hw_info.personality = QED_PCI_ETH; | |
302 | ||
303 | return qed_vf_pf_acquire(p_hwfn); | |
304 | ||
305 | free_vf2pf_request: | |
306 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
307 | sizeof(union vfpf_tlvs), | |
308 | p_iov->vf2pf_request, p_iov->vf2pf_request_phys); | |
309 | free_p_iov: | |
310 | kfree(p_iov); | |
311 | ||
312 | return -ENOMEM; | |
313 | } | |
314 | ||
dacd88d6 YM |
315 | int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, |
316 | u8 rx_qid, | |
317 | u16 sb, | |
318 | u8 sb_index, | |
319 | u16 bd_max_bytes, | |
320 | dma_addr_t bd_chain_phys_addr, | |
321 | dma_addr_t cqe_pbl_addr, | |
322 | u16 cqe_pbl_size, void __iomem **pp_prod) | |
323 | { | |
324 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
325 | struct pfvf_start_queue_resp_tlv *resp; | |
326 | struct vfpf_start_rxq_tlv *req; | |
327 | int rc; | |
328 | ||
329 | /* clear mailbox and prep first tlv */ | |
330 | req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); | |
331 | ||
332 | req->rx_qid = rx_qid; | |
333 | req->cqe_pbl_addr = cqe_pbl_addr; | |
334 | req->cqe_pbl_size = cqe_pbl_size; | |
335 | req->rxq_addr = bd_chain_phys_addr; | |
336 | req->hw_sb = sb; | |
337 | req->sb_index = sb_index; | |
338 | req->bd_max_bytes = bd_max_bytes; | |
339 | req->stat_id = -1; | |
340 | ||
341 | /* add list termination tlv */ | |
342 | qed_add_tlv(p_hwfn, &p_iov->offset, | |
343 | CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); | |
344 | ||
345 | resp = &p_iov->pf2vf_reply->queue_start; | |
346 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); | |
347 | if (rc) | |
348 | return rc; | |
349 | ||
350 | if (resp->hdr.status != PFVF_STATUS_SUCCESS) | |
351 | return -EINVAL; | |
352 | ||
353 | /* Learn the address of the producer from the response */ | |
354 | if (pp_prod) { | |
355 | u64 init_prod_val = 0; | |
356 | ||
357 | *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; | |
358 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
359 | "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", | |
360 | rx_qid, *pp_prod, resp->offset); | |
361 | ||
362 | /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ | |
363 | __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), | |
364 | (u32 *)&init_prod_val); | |
365 | } | |
366 | ||
367 | return rc; | |
368 | } | |
369 | ||
370 | int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) | |
371 | { | |
372 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
373 | struct vfpf_stop_rxqs_tlv *req; | |
374 | struct pfvf_def_resp_tlv *resp; | |
375 | int rc; | |
376 | ||
377 | /* clear mailbox and prep first tlv */ | |
378 | req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); | |
379 | ||
380 | req->rx_qid = rx_qid; | |
381 | req->num_rxqs = 1; | |
382 | req->cqe_completion = cqe_completion; | |
383 | ||
384 | /* add list termination tlv */ | |
385 | qed_add_tlv(p_hwfn, &p_iov->offset, | |
386 | CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); | |
387 | ||
388 | resp = &p_iov->pf2vf_reply->default_resp; | |
389 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); | |
390 | if (rc) | |
391 | return rc; | |
392 | ||
393 | if (resp->hdr.status != PFVF_STATUS_SUCCESS) | |
394 | return -EINVAL; | |
395 | ||
396 | return rc; | |
397 | } | |
398 | ||
399 | int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, | |
400 | u16 tx_queue_id, | |
401 | u16 sb, | |
402 | u8 sb_index, | |
403 | dma_addr_t pbl_addr, | |
404 | u16 pbl_size, void __iomem **pp_doorbell) | |
405 | { | |
406 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
407 | struct vfpf_start_txq_tlv *req; | |
408 | struct pfvf_def_resp_tlv *resp; | |
409 | int rc; | |
410 | ||
411 | /* clear mailbox and prep first tlv */ | |
412 | req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); | |
413 | ||
414 | req->tx_qid = tx_queue_id; | |
415 | ||
416 | /* Tx */ | |
417 | req->pbl_addr = pbl_addr; | |
418 | req->pbl_size = pbl_size; | |
419 | req->hw_sb = sb; | |
420 | req->sb_index = sb_index; | |
421 | ||
422 | /* add list termination tlv */ | |
423 | qed_add_tlv(p_hwfn, &p_iov->offset, | |
424 | CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); | |
425 | ||
426 | resp = &p_iov->pf2vf_reply->default_resp; | |
427 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); | |
428 | if (rc) | |
429 | return rc; | |
430 | ||
431 | if (resp->hdr.status != PFVF_STATUS_SUCCESS) | |
432 | return -EINVAL; | |
433 | ||
434 | if (pp_doorbell) { | |
435 | u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; | |
436 | ||
437 | *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + | |
438 | qed_db_addr(cid, DQ_DEMS_LEGACY); | |
439 | } | |
440 | ||
441 | return rc; | |
442 | } | |
443 | ||
444 | int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) | |
445 | { | |
446 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
447 | struct vfpf_stop_txqs_tlv *req; | |
448 | struct pfvf_def_resp_tlv *resp; | |
449 | int rc; | |
450 | ||
451 | /* clear mailbox and prep first tlv */ | |
452 | req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); | |
453 | ||
454 | req->tx_qid = tx_qid; | |
455 | req->num_txqs = 1; | |
456 | ||
457 | /* add list termination tlv */ | |
458 | qed_add_tlv(p_hwfn, &p_iov->offset, | |
459 | CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); | |
460 | ||
461 | resp = &p_iov->pf2vf_reply->default_resp; | |
462 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); | |
463 | if (rc) | |
464 | return rc; | |
465 | ||
466 | if (resp->hdr.status != PFVF_STATUS_SUCCESS) | |
467 | return -EINVAL; | |
468 | ||
469 | return rc; | |
470 | } | |
471 | ||
472 | int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, | |
473 | u8 vport_id, | |
474 | u16 mtu, | |
475 | u8 inner_vlan_removal, | |
476 | enum qed_tpa_mode tpa_mode, | |
08feecd7 | 477 | u8 max_buffers_per_cqe, u8 only_untagged) |
dacd88d6 YM |
478 | { |
479 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
480 | struct vfpf_vport_start_tlv *req; | |
481 | struct pfvf_def_resp_tlv *resp; | |
482 | int rc, i; | |
483 | ||
484 | /* clear mailbox and prep first tlv */ | |
485 | req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); | |
486 | ||
487 | req->mtu = mtu; | |
488 | req->vport_id = vport_id; | |
489 | req->inner_vlan_removal = inner_vlan_removal; | |
490 | req->tpa_mode = tpa_mode; | |
491 | req->max_buffers_per_cqe = max_buffers_per_cqe; | |
08feecd7 | 492 | req->only_untagged = only_untagged; |
dacd88d6 YM |
493 | |
494 | /* status blocks */ | |
495 | for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) | |
496 | if (p_hwfn->sbs_info[i]) | |
497 | req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys; | |
498 | ||
499 | /* add list termination tlv */ | |
500 | qed_add_tlv(p_hwfn, &p_iov->offset, | |
501 | CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); | |
502 | ||
503 | resp = &p_iov->pf2vf_reply->default_resp; | |
504 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); | |
505 | if (rc) | |
506 | return rc; | |
507 | ||
508 | if (resp->hdr.status != PFVF_STATUS_SUCCESS) | |
509 | return -EINVAL; | |
510 | ||
511 | return rc; | |
512 | } | |
513 | ||
514 | int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) | |
515 | { | |
516 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
517 | struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; | |
518 | int rc; | |
519 | ||
520 | /* clear mailbox and prep first tlv */ | |
521 | qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, | |
522 | sizeof(struct vfpf_first_tlv)); | |
523 | ||
524 | /* add list termination tlv */ | |
525 | qed_add_tlv(p_hwfn, &p_iov->offset, | |
526 | CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); | |
527 | ||
528 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); | |
529 | if (rc) | |
530 | return rc; | |
531 | ||
532 | if (resp->hdr.status != PFVF_STATUS_SUCCESS) | |
533 | return -EINVAL; | |
534 | ||
535 | return rc; | |
536 | } | |
537 | ||
538 | static bool | |
539 | qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, | |
540 | struct qed_sp_vport_update_params *p_data, | |
541 | u16 tlv) | |
542 | { | |
543 | switch (tlv) { | |
544 | case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: | |
545 | return !!(p_data->update_vport_active_rx_flg || | |
546 | p_data->update_vport_active_tx_flg); | |
17b235c1 YM |
547 | case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: |
548 | return !!p_data->update_tx_switching_flg; | |
549 | case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: | |
550 | return !!p_data->update_inner_vlan_removal_flg; | |
08feecd7 YM |
551 | case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: |
552 | return !!p_data->update_accept_any_vlan_flg; | |
dacd88d6 YM |
553 | case CHANNEL_TLV_VPORT_UPDATE_MCAST: |
554 | return !!p_data->update_approx_mcast_flg; | |
555 | case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: | |
556 | return !!(p_data->accept_flags.update_rx_mode_config || | |
557 | p_data->accept_flags.update_tx_mode_config); | |
558 | case CHANNEL_TLV_VPORT_UPDATE_RSS: | |
559 | return !!p_data->rss_params; | |
17b235c1 YM |
560 | case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: |
561 | return !!p_data->sge_tpa_params; | |
dacd88d6 YM |
562 | default: |
563 | DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n", | |
564 | tlv); | |
565 | return false; | |
566 | } | |
567 | } | |
568 | ||
569 | static void | |
570 | qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn, | |
571 | struct qed_sp_vport_update_params *p_data) | |
572 | { | |
573 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
574 | struct pfvf_def_resp_tlv *p_resp; | |
575 | u16 tlv; | |
576 | ||
577 | for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; | |
578 | tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) { | |
579 | if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) | |
580 | continue; | |
581 | ||
582 | p_resp = (struct pfvf_def_resp_tlv *) | |
583 | qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, | |
584 | tlv); | |
585 | if (p_resp && p_resp->hdr.status) | |
586 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
587 | "TLV[%d] Configuration %s\n", | |
588 | tlv, | |
589 | (p_resp && p_resp->hdr.status) ? "succeeded" | |
590 | : "failed"); | |
591 | } | |
592 | } | |
593 | ||
594 | int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, | |
595 | struct qed_sp_vport_update_params *p_params) | |
596 | { | |
597 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
598 | struct vfpf_vport_update_tlv *req; | |
599 | struct pfvf_def_resp_tlv *resp; | |
600 | u8 update_rx, update_tx; | |
601 | u32 resp_size = 0; | |
602 | u16 size, tlv; | |
603 | int rc; | |
604 | ||
605 | resp = &p_iov->pf2vf_reply->default_resp; | |
606 | resp_size = sizeof(*resp); | |
607 | ||
608 | update_rx = p_params->update_vport_active_rx_flg; | |
609 | update_tx = p_params->update_vport_active_tx_flg; | |
610 | ||
611 | /* clear mailbox and prep header tlv */ | |
612 | qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); | |
613 | ||
614 | /* Prepare extended tlvs */ | |
615 | if (update_rx || update_tx) { | |
616 | struct vfpf_vport_update_activate_tlv *p_act_tlv; | |
617 | ||
618 | size = sizeof(struct vfpf_vport_update_activate_tlv); | |
619 | p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, | |
620 | CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, | |
621 | size); | |
622 | resp_size += sizeof(struct pfvf_def_resp_tlv); | |
623 | ||
624 | if (update_rx) { | |
625 | p_act_tlv->update_rx = update_rx; | |
626 | p_act_tlv->active_rx = p_params->vport_active_rx_flg; | |
627 | } | |
628 | ||
629 | if (update_tx) { | |
630 | p_act_tlv->update_tx = update_tx; | |
631 | p_act_tlv->active_tx = p_params->vport_active_tx_flg; | |
632 | } | |
633 | } | |
634 | ||
635 | if (p_params->update_approx_mcast_flg) { | |
636 | struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; | |
637 | ||
638 | size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); | |
639 | p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, | |
640 | CHANNEL_TLV_VPORT_UPDATE_MCAST, size); | |
641 | resp_size += sizeof(struct pfvf_def_resp_tlv); | |
642 | ||
643 | memcpy(p_mcast_tlv->bins, p_params->bins, | |
644 | sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); | |
645 | } | |
646 | ||
647 | update_rx = p_params->accept_flags.update_rx_mode_config; | |
648 | update_tx = p_params->accept_flags.update_tx_mode_config; | |
649 | ||
650 | if (update_rx || update_tx) { | |
651 | struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; | |
652 | ||
653 | tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; | |
654 | size = sizeof(struct vfpf_vport_update_accept_param_tlv); | |
655 | p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); | |
656 | resp_size += sizeof(struct pfvf_def_resp_tlv); | |
657 | ||
658 | if (update_rx) { | |
659 | p_accept_tlv->update_rx_mode = update_rx; | |
660 | p_accept_tlv->rx_accept_filter = | |
661 | p_params->accept_flags.rx_accept_filter; | |
662 | } | |
663 | ||
664 | if (update_tx) { | |
665 | p_accept_tlv->update_tx_mode = update_tx; | |
666 | p_accept_tlv->tx_accept_filter = | |
667 | p_params->accept_flags.tx_accept_filter; | |
668 | } | |
669 | } | |
670 | ||
671 | if (p_params->rss_params) { | |
672 | struct qed_rss_params *rss_params = p_params->rss_params; | |
673 | struct vfpf_vport_update_rss_tlv *p_rss_tlv; | |
674 | ||
675 | size = sizeof(struct vfpf_vport_update_rss_tlv); | |
676 | p_rss_tlv = qed_add_tlv(p_hwfn, | |
677 | &p_iov->offset, | |
678 | CHANNEL_TLV_VPORT_UPDATE_RSS, size); | |
679 | resp_size += sizeof(struct pfvf_def_resp_tlv); | |
680 | ||
681 | if (rss_params->update_rss_config) | |
682 | p_rss_tlv->update_rss_flags |= | |
683 | VFPF_UPDATE_RSS_CONFIG_FLAG; | |
684 | if (rss_params->update_rss_capabilities) | |
685 | p_rss_tlv->update_rss_flags |= | |
686 | VFPF_UPDATE_RSS_CAPS_FLAG; | |
687 | if (rss_params->update_rss_ind_table) | |
688 | p_rss_tlv->update_rss_flags |= | |
689 | VFPF_UPDATE_RSS_IND_TABLE_FLAG; | |
690 | if (rss_params->update_rss_key) | |
691 | p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; | |
692 | ||
693 | p_rss_tlv->rss_enable = rss_params->rss_enable; | |
694 | p_rss_tlv->rss_caps = rss_params->rss_caps; | |
695 | p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; | |
696 | memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table, | |
697 | sizeof(rss_params->rss_ind_table)); | |
698 | memcpy(p_rss_tlv->rss_key, rss_params->rss_key, | |
699 | sizeof(rss_params->rss_key)); | |
700 | } | |
701 | ||
08feecd7 YM |
702 | if (p_params->update_accept_any_vlan_flg) { |
703 | struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; | |
704 | ||
705 | size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); | |
706 | tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; | |
707 | p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); | |
708 | ||
709 | resp_size += sizeof(struct pfvf_def_resp_tlv); | |
710 | p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; | |
711 | p_any_vlan_tlv->update_accept_any_vlan_flg = | |
712 | p_params->update_accept_any_vlan_flg; | |
713 | } | |
714 | ||
dacd88d6 YM |
715 | /* add list termination tlv */ |
716 | qed_add_tlv(p_hwfn, &p_iov->offset, | |
717 | CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); | |
718 | ||
719 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); | |
720 | if (rc) | |
721 | return rc; | |
722 | ||
723 | if (resp->hdr.status != PFVF_STATUS_SUCCESS) | |
724 | return -EINVAL; | |
725 | ||
726 | qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); | |
727 | ||
728 | return rc; | |
729 | } | |
730 | ||
0b55e27d YM |
731 | int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) |
732 | { | |
733 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
734 | struct pfvf_def_resp_tlv *resp; | |
735 | struct vfpf_first_tlv *req; | |
736 | int rc; | |
737 | ||
738 | /* clear mailbox and prep first tlv */ | |
739 | req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); | |
740 | ||
741 | /* add list termination tlv */ | |
742 | qed_add_tlv(p_hwfn, &p_iov->offset, | |
743 | CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); | |
744 | ||
745 | resp = &p_iov->pf2vf_reply->default_resp; | |
746 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); | |
747 | if (rc) | |
748 | return rc; | |
749 | ||
750 | if (resp->hdr.status != PFVF_STATUS_SUCCESS) | |
751 | return -EAGAIN; | |
752 | ||
753 | p_hwfn->b_int_enabled = 0; | |
754 | ||
755 | return 0; | |
756 | } | |
757 | ||
758 | int qed_vf_pf_release(struct qed_hwfn *p_hwfn) | |
759 | { | |
760 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
761 | struct pfvf_def_resp_tlv *resp; | |
762 | struct vfpf_first_tlv *req; | |
763 | u32 size; | |
764 | int rc; | |
765 | ||
766 | /* clear mailbox and prep first tlv */ | |
767 | req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); | |
768 | ||
769 | /* add list termination tlv */ | |
770 | qed_add_tlv(p_hwfn, &p_iov->offset, | |
771 | CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); | |
772 | ||
773 | resp = &p_iov->pf2vf_reply->default_resp; | |
774 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); | |
775 | ||
776 | if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) | |
777 | rc = -EAGAIN; | |
778 | ||
779 | p_hwfn->b_int_enabled = 0; | |
780 | ||
781 | if (p_iov->vf2pf_request) | |
782 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
783 | sizeof(union vfpf_tlvs), | |
784 | p_iov->vf2pf_request, | |
785 | p_iov->vf2pf_request_phys); | |
786 | if (p_iov->pf2vf_reply) | |
787 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
788 | sizeof(union pfvf_tlvs), | |
789 | p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); | |
790 | ||
791 | if (p_iov->bulletin.p_virt) { | |
792 | size = sizeof(struct qed_bulletin_content); | |
793 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
794 | size, | |
795 | p_iov->bulletin.p_virt, p_iov->bulletin.phys); | |
796 | } | |
797 | ||
798 | kfree(p_hwfn->vf_iov_info); | |
799 | p_hwfn->vf_iov_info = NULL; | |
800 | ||
801 | return rc; | |
802 | } | |
803 | ||
dacd88d6 YM |
804 | void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, |
805 | struct qed_filter_mcast *p_filter_cmd) | |
806 | { | |
807 | struct qed_sp_vport_update_params sp_params; | |
808 | int i; | |
809 | ||
810 | memset(&sp_params, 0, sizeof(sp_params)); | |
811 | sp_params.update_approx_mcast_flg = 1; | |
812 | ||
813 | if (p_filter_cmd->opcode == QED_FILTER_ADD) { | |
814 | for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { | |
815 | u32 bit; | |
816 | ||
817 | bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); | |
818 | __set_bit(bit, sp_params.bins); | |
819 | } | |
820 | } | |
821 | ||
822 | qed_vf_pf_vport_update(p_hwfn, &sp_params); | |
823 | } | |
824 | ||
825 | int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, | |
826 | struct qed_filter_ucast *p_ucast) | |
827 | { | |
828 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
829 | struct vfpf_ucast_filter_tlv *req; | |
830 | struct pfvf_def_resp_tlv *resp; | |
831 | int rc; | |
832 | ||
833 | /* clear mailbox and prep first tlv */ | |
834 | req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); | |
835 | req->opcode = (u8) p_ucast->opcode; | |
836 | req->type = (u8) p_ucast->type; | |
837 | memcpy(req->mac, p_ucast->mac, ETH_ALEN); | |
838 | req->vlan = p_ucast->vlan; | |
839 | ||
840 | /* add list termination tlv */ | |
841 | qed_add_tlv(p_hwfn, &p_iov->offset, | |
842 | CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); | |
843 | ||
844 | resp = &p_iov->pf2vf_reply->default_resp; | |
845 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); | |
846 | if (rc) | |
847 | return rc; | |
848 | ||
849 | if (resp->hdr.status != PFVF_STATUS_SUCCESS) | |
850 | return -EAGAIN; | |
851 | ||
852 | return 0; | |
853 | } | |
854 | ||
0b55e27d YM |
855 | int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) |
856 | { | |
857 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
858 | struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; | |
859 | int rc; | |
860 | ||
861 | /* clear mailbox and prep first tlv */ | |
862 | qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, | |
863 | sizeof(struct vfpf_first_tlv)); | |
864 | ||
865 | /* add list termination tlv */ | |
866 | qed_add_tlv(p_hwfn, &p_iov->offset, | |
867 | CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); | |
868 | ||
869 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); | |
870 | if (rc) | |
871 | return rc; | |
872 | ||
873 | if (resp->hdr.status != PFVF_STATUS_SUCCESS) | |
874 | return -EINVAL; | |
875 | ||
876 | return 0; | |
877 | } | |
878 | ||
1408cc1f YM |
879 | u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) |
880 | { | |
881 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
882 | ||
883 | if (!p_iov) { | |
884 | DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); | |
885 | return 0; | |
886 | } | |
887 | ||
888 | return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; | |
889 | } | |
890 | ||
36558c3d YM |
891 | int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) |
892 | { | |
893 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; | |
894 | struct qed_bulletin_content shadow; | |
895 | u32 crc, crc_size; | |
896 | ||
897 | crc_size = sizeof(p_iov->bulletin.p_virt->crc); | |
898 | *p_change = 0; | |
899 | ||
900 | /* Need to guarantee PF is not in the middle of writing it */ | |
901 | memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); | |
902 | ||
903 | /* If version did not update, no need to do anything */ | |
904 | if (shadow.version == p_iov->bulletin_shadow.version) | |
905 | return 0; | |
906 | ||
907 | /* Verify the bulletin we see is valid */ | |
908 | crc = crc32(0, (u8 *)&shadow + crc_size, | |
909 | p_iov->bulletin.size - crc_size); | |
910 | if (crc != shadow.crc) | |
911 | return -EAGAIN; | |
912 | ||
913 | /* Set the shadow bulletin and process it */ | |
914 | memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); | |
915 | ||
916 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
917 | "Read a bulletin update %08x\n", shadow.version); | |
918 | ||
919 | *p_change = 1; | |
920 | ||
921 | return 0; | |
922 | } | |
923 | ||
924 | void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, | |
925 | struct qed_mcp_link_params *p_params, | |
926 | struct qed_bulletin_content *p_bulletin) | |
927 | { | |
928 | memset(p_params, 0, sizeof(*p_params)); | |
929 | ||
930 | p_params->speed.autoneg = p_bulletin->req_autoneg; | |
931 | p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; | |
932 | p_params->speed.forced_speed = p_bulletin->req_forced_speed; | |
933 | p_params->pause.autoneg = p_bulletin->req_autoneg_pause; | |
934 | p_params->pause.forced_rx = p_bulletin->req_forced_rx; | |
935 | p_params->pause.forced_tx = p_bulletin->req_forced_tx; | |
936 | p_params->loopback_mode = p_bulletin->req_loopback; | |
937 | } | |
938 | ||
939 | void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, | |
940 | struct qed_mcp_link_params *params) | |
941 | { | |
942 | __qed_vf_get_link_params(p_hwfn, params, | |
943 | &(p_hwfn->vf_iov_info->bulletin_shadow)); | |
944 | } | |
945 | ||
946 | void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, | |
947 | struct qed_mcp_link_state *p_link, | |
948 | struct qed_bulletin_content *p_bulletin) | |
949 | { | |
950 | memset(p_link, 0, sizeof(*p_link)); | |
951 | ||
952 | p_link->link_up = p_bulletin->link_up; | |
953 | p_link->speed = p_bulletin->speed; | |
954 | p_link->full_duplex = p_bulletin->full_duplex; | |
955 | p_link->an = p_bulletin->autoneg; | |
956 | p_link->an_complete = p_bulletin->autoneg_complete; | |
957 | p_link->parallel_detection = p_bulletin->parallel_detection; | |
958 | p_link->pfc_enabled = p_bulletin->pfc_enabled; | |
959 | p_link->partner_adv_speed = p_bulletin->partner_adv_speed; | |
960 | p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; | |
961 | p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; | |
962 | p_link->partner_adv_pause = p_bulletin->partner_adv_pause; | |
963 | p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; | |
964 | } | |
965 | ||
966 | void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, | |
967 | struct qed_mcp_link_state *link) | |
968 | { | |
969 | __qed_vf_get_link_state(p_hwfn, link, | |
970 | &(p_hwfn->vf_iov_info->bulletin_shadow)); | |
971 | } | |
972 | ||
973 | void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, | |
974 | struct qed_mcp_link_capabilities *p_link_caps, | |
975 | struct qed_bulletin_content *p_bulletin) | |
976 | { | |
977 | memset(p_link_caps, 0, sizeof(*p_link_caps)); | |
978 | p_link_caps->speed_capabilities = p_bulletin->capability_speed; | |
979 | } | |
980 | ||
981 | void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, | |
982 | struct qed_mcp_link_capabilities *p_link_caps) | |
983 | { | |
984 | __qed_vf_get_link_caps(p_hwfn, p_link_caps, | |
985 | &(p_hwfn->vf_iov_info->bulletin_shadow)); | |
986 | } | |
987 | ||
1408cc1f YM |
988 | void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) |
989 | { | |
990 | *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; | |
991 | } | |
992 | ||
993 | void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) | |
994 | { | |
995 | memcpy(port_mac, | |
996 | p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN); | |
997 | } | |
998 | ||
999 | void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) | |
1000 | { | |
1001 | struct qed_vf_iov *p_vf; | |
1002 | ||
1003 | p_vf = p_hwfn->vf_iov_info; | |
1004 | *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; | |
1005 | } | |
1006 | ||
1007 | void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, | |
1008 | u16 *fw_major, u16 *fw_minor, | |
1009 | u16 *fw_rev, u16 *fw_eng) | |
1010 | { | |
1011 | struct pf_vf_pfdev_info *info; | |
1012 | ||
1013 | info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; | |
1014 | ||
1015 | *fw_major = info->fw_major; | |
1016 | *fw_minor = info->fw_minor; | |
1017 | *fw_rev = info->fw_rev; | |
1018 | *fw_eng = info->fw_eng; | |
1019 | } | |
36558c3d YM |
1020 | |
1021 | static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) | |
1022 | { | |
1023 | /* Always update link configuration according to bulletin */ | |
1024 | qed_link_update(hwfn); | |
1025 | } | |
1026 | ||
1027 | void qed_iov_vf_task(struct work_struct *work) | |
1028 | { | |
1029 | struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, | |
1030 | iov_task.work); | |
1031 | u8 change = 0; | |
1032 | ||
1033 | if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) | |
1034 | return; | |
1035 | ||
1036 | /* Handle bulletin board changes */ | |
1037 | qed_vf_read_bulletin(hwfn, &change); | |
1038 | if (change) | |
1039 | qed_handle_bulletin_change(hwfn); | |
1040 | ||
1041 | /* As VF is polling bulletin board, need to constantly re-schedule */ | |
1042 | queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ); | |
1043 | } |