1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <asm/param.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/etherdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/stddef.h>
21 #include <linux/string.h>
22 #include <linux/version.h>
23 #include <linux/workqueue.h>
24 #include <linux/bitops.h>
25 #include <linux/bug.h>
27 #include <linux/qed/qed_chain.h>
29 #include "qed_dev_api.h"
30 #include <linux/qed/qed_eth_if.h>
36 #include "qed_reg_addr.h"
38 #include "qed_sriov.h"
41 #define QED_MAX_SGES_NUM 16
42 #define CRC32_POLY 0x1edc6f41
44 int qed_sp_eth_vport_start(struct qed_hwfn
*p_hwfn
,
45 struct qed_sp_vport_start_params
*p_params
)
47 struct vport_start_ramrod_data
*p_ramrod
= NULL
;
48 struct qed_spq_entry
*p_ent
= NULL
;
49 struct qed_sp_init_data init_data
;
54 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
58 memset(&init_data
, 0, sizeof(init_data
));
59 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
60 init_data
.opaque_fid
= p_params
->opaque_fid
;
61 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
63 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
64 ETH_RAMROD_VPORT_START
,
65 PROTOCOLID_ETH
, &init_data
);
69 p_ramrod
= &p_ent
->ramrod
.vport_start
;
70 p_ramrod
->vport_id
= abs_vport_id
;
72 p_ramrod
->mtu
= cpu_to_le16(p_params
->mtu
);
73 p_ramrod
->inner_vlan_removal_en
= p_params
->remove_inner_vlan
;
74 p_ramrod
->drop_ttl0_en
= p_params
->drop_ttl0
;
76 SET_FIELD(rx_mode
, ETH_VPORT_RX_MODE_UCAST_DROP_ALL
, 1);
77 SET_FIELD(rx_mode
, ETH_VPORT_RX_MODE_MCAST_DROP_ALL
, 1);
79 p_ramrod
->rx_mode
.state
= cpu_to_le16(rx_mode
);
81 /* TPA related fields */
82 memset(&p_ramrod
->tpa_param
, 0,
83 sizeof(struct eth_vport_tpa_param
));
85 p_ramrod
->tpa_param
.max_buff_num
= p_params
->max_buffers_per_cqe
;
87 switch (p_params
->tpa_mode
) {
88 case QED_TPA_MODE_GRO
:
89 p_ramrod
->tpa_param
.tpa_max_aggs_num
= ETH_TPA_MAX_AGGS_NUM
;
90 p_ramrod
->tpa_param
.tpa_max_size
= (u16
)-1;
91 p_ramrod
->tpa_param
.tpa_min_size_to_cont
= p_params
->mtu
/ 2;
92 p_ramrod
->tpa_param
.tpa_min_size_to_start
= p_params
->mtu
/ 2;
93 p_ramrod
->tpa_param
.tpa_ipv4_en_flg
= 1;
94 p_ramrod
->tpa_param
.tpa_ipv6_en_flg
= 1;
95 p_ramrod
->tpa_param
.tpa_pkt_split_flg
= 1;
96 p_ramrod
->tpa_param
.tpa_gro_consistent_flg
= 1;
102 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
103 p_ramrod
->sw_fid
= qed_concrete_to_sw_fid(p_hwfn
->cdev
,
104 p_params
->concrete_fid
);
106 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
109 int qed_sp_vport_start(struct qed_hwfn
*p_hwfn
,
110 struct qed_sp_vport_start_params
*p_params
)
112 if (IS_VF(p_hwfn
->cdev
)) {
113 return qed_vf_pf_vport_start(p_hwfn
, p_params
->vport_id
,
115 p_params
->remove_inner_vlan
,
117 p_params
->max_buffers_per_cqe
);
120 return qed_sp_eth_vport_start(p_hwfn
, p_params
);
124 qed_sp_vport_update_rss(struct qed_hwfn
*p_hwfn
,
125 struct vport_update_ramrod_data
*p_ramrod
,
126 struct qed_rss_params
*p_params
)
128 struct eth_vport_rss_config
*rss
= &p_ramrod
->rss_config
;
129 u16 abs_l2_queue
= 0, capabilities
= 0;
133 p_ramrod
->common
.update_rss_flg
= 0;
137 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE
!=
138 ETH_RSS_IND_TABLE_ENTRIES_NUM
);
140 rc
= qed_fw_rss_eng(p_hwfn
, p_params
->rss_eng_id
, &rss
->rss_id
);
144 p_ramrod
->common
.update_rss_flg
= p_params
->update_rss_config
;
145 rss
->update_rss_capabilities
= p_params
->update_rss_capabilities
;
146 rss
->update_rss_ind_table
= p_params
->update_rss_ind_table
;
147 rss
->update_rss_key
= p_params
->update_rss_key
;
149 rss
->rss_mode
= p_params
->rss_enable
?
150 ETH_VPORT_RSS_MODE_REGULAR
:
151 ETH_VPORT_RSS_MODE_DISABLED
;
153 SET_FIELD(capabilities
,
154 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY
,
155 !!(p_params
->rss_caps
& QED_RSS_IPV4
));
156 SET_FIELD(capabilities
,
157 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY
,
158 !!(p_params
->rss_caps
& QED_RSS_IPV6
));
159 SET_FIELD(capabilities
,
160 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY
,
161 !!(p_params
->rss_caps
& QED_RSS_IPV4_TCP
));
162 SET_FIELD(capabilities
,
163 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY
,
164 !!(p_params
->rss_caps
& QED_RSS_IPV6_TCP
));
165 SET_FIELD(capabilities
,
166 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY
,
167 !!(p_params
->rss_caps
& QED_RSS_IPV4_UDP
));
168 SET_FIELD(capabilities
,
169 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY
,
170 !!(p_params
->rss_caps
& QED_RSS_IPV6_UDP
));
171 rss
->tbl_size
= p_params
->rss_table_size_log
;
173 rss
->capabilities
= cpu_to_le16(capabilities
);
175 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
176 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
177 p_ramrod
->common
.update_rss_flg
,
178 rss
->rss_mode
, rss
->update_rss_capabilities
,
179 capabilities
, rss
->update_rss_ind_table
,
180 rss
->update_rss_key
);
182 for (i
= 0; i
< QED_RSS_IND_TABLE_SIZE
; i
++) {
183 rc
= qed_fw_l2_queue(p_hwfn
,
184 (u8
)p_params
->rss_ind_table
[i
],
189 rss
->indirection_table
[i
] = cpu_to_le16(abs_l2_queue
);
190 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
, "i= %d, queue = %d\n",
191 i
, rss
->indirection_table
[i
]);
194 for (i
= 0; i
< 10; i
++)
195 rss
->rss_key
[i
] = cpu_to_le32(p_params
->rss_key
[i
]);
201 qed_sp_update_accept_mode(struct qed_hwfn
*p_hwfn
,
202 struct vport_update_ramrod_data
*p_ramrod
,
203 struct qed_filter_accept_flags accept_flags
)
205 p_ramrod
->common
.update_rx_mode_flg
=
206 accept_flags
.update_rx_mode_config
;
208 p_ramrod
->common
.update_tx_mode_flg
=
209 accept_flags
.update_tx_mode_config
;
211 /* Set Rx mode accept flags */
212 if (p_ramrod
->common
.update_rx_mode_flg
) {
213 u8 accept_filter
= accept_flags
.rx_accept_filter
;
216 SET_FIELD(state
, ETH_VPORT_RX_MODE_UCAST_DROP_ALL
,
217 !(!!(accept_filter
& QED_ACCEPT_UCAST_MATCHED
) ||
218 !!(accept_filter
& QED_ACCEPT_UCAST_UNMATCHED
)));
220 SET_FIELD(state
, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED
,
221 !!(accept_filter
& QED_ACCEPT_UCAST_UNMATCHED
));
223 SET_FIELD(state
, ETH_VPORT_RX_MODE_MCAST_DROP_ALL
,
224 !(!!(accept_filter
& QED_ACCEPT_MCAST_MATCHED
) ||
225 !!(accept_filter
& QED_ACCEPT_MCAST_UNMATCHED
)));
227 SET_FIELD(state
, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL
,
228 (!!(accept_filter
& QED_ACCEPT_MCAST_MATCHED
) &&
229 !!(accept_filter
& QED_ACCEPT_MCAST_UNMATCHED
)));
231 SET_FIELD(state
, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL
,
232 !!(accept_filter
& QED_ACCEPT_BCAST
));
234 p_ramrod
->rx_mode
.state
= cpu_to_le16(state
);
235 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
236 "p_ramrod->rx_mode.state = 0x%x\n", state
);
239 /* Set Tx mode accept flags */
240 if (p_ramrod
->common
.update_tx_mode_flg
) {
241 u8 accept_filter
= accept_flags
.tx_accept_filter
;
244 SET_FIELD(state
, ETH_VPORT_TX_MODE_UCAST_DROP_ALL
,
245 !!(accept_filter
& QED_ACCEPT_NONE
));
247 SET_FIELD(state
, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL
,
248 (!!(accept_filter
& QED_ACCEPT_UCAST_MATCHED
) &&
249 !!(accept_filter
& QED_ACCEPT_UCAST_UNMATCHED
)));
251 SET_FIELD(state
, ETH_VPORT_TX_MODE_MCAST_DROP_ALL
,
252 !!(accept_filter
& QED_ACCEPT_NONE
));
254 SET_FIELD(state
, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL
,
255 (!!(accept_filter
& QED_ACCEPT_MCAST_MATCHED
) &&
256 !!(accept_filter
& QED_ACCEPT_MCAST_UNMATCHED
)));
258 SET_FIELD(state
, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL
,
259 !!(accept_filter
& QED_ACCEPT_BCAST
));
261 p_ramrod
->tx_mode
.state
= cpu_to_le16(state
);
262 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
263 "p_ramrod->tx_mode.state = 0x%x\n", state
);
268 qed_sp_vport_update_sge_tpa(struct qed_hwfn
*p_hwfn
,
269 struct vport_update_ramrod_data
*p_ramrod
,
270 struct qed_sge_tpa_params
*p_params
)
272 struct eth_vport_tpa_param
*p_tpa
;
275 p_ramrod
->common
.update_tpa_param_flg
= 0;
276 p_ramrod
->common
.update_tpa_en_flg
= 0;
277 p_ramrod
->common
.update_tpa_param_flg
= 0;
281 p_ramrod
->common
.update_tpa_en_flg
= p_params
->update_tpa_en_flg
;
282 p_tpa
= &p_ramrod
->tpa_param
;
283 p_tpa
->tpa_ipv4_en_flg
= p_params
->tpa_ipv4_en_flg
;
284 p_tpa
->tpa_ipv6_en_flg
= p_params
->tpa_ipv6_en_flg
;
285 p_tpa
->tpa_ipv4_tunn_en_flg
= p_params
->tpa_ipv4_tunn_en_flg
;
286 p_tpa
->tpa_ipv6_tunn_en_flg
= p_params
->tpa_ipv6_tunn_en_flg
;
288 p_ramrod
->common
.update_tpa_param_flg
= p_params
->update_tpa_param_flg
;
289 p_tpa
->max_buff_num
= p_params
->max_buffers_per_cqe
;
290 p_tpa
->tpa_pkt_split_flg
= p_params
->tpa_pkt_split_flg
;
291 p_tpa
->tpa_hdr_data_split_flg
= p_params
->tpa_hdr_data_split_flg
;
292 p_tpa
->tpa_gro_consistent_flg
= p_params
->tpa_gro_consistent_flg
;
293 p_tpa
->tpa_max_aggs_num
= p_params
->tpa_max_aggs_num
;
294 p_tpa
->tpa_max_size
= p_params
->tpa_max_size
;
295 p_tpa
->tpa_min_size_to_start
= p_params
->tpa_min_size_to_start
;
296 p_tpa
->tpa_min_size_to_cont
= p_params
->tpa_min_size_to_cont
;
300 qed_sp_update_mcast_bin(struct qed_hwfn
*p_hwfn
,
301 struct vport_update_ramrod_data
*p_ramrod
,
302 struct qed_sp_vport_update_params
*p_params
)
306 memset(&p_ramrod
->approx_mcast
.bins
, 0,
307 sizeof(p_ramrod
->approx_mcast
.bins
));
309 if (p_params
->update_approx_mcast_flg
) {
310 p_ramrod
->common
.update_approx_mcast_flg
= 1;
311 for (i
= 0; i
< ETH_MULTICAST_MAC_BINS_IN_REGS
; i
++) {
312 u32
*p_bins
= (u32
*)p_params
->bins
;
313 __le32 val
= cpu_to_le32(p_bins
[i
]);
315 p_ramrod
->approx_mcast
.bins
[i
] = val
;
320 int qed_sp_vport_update(struct qed_hwfn
*p_hwfn
,
321 struct qed_sp_vport_update_params
*p_params
,
322 enum spq_mode comp_mode
,
323 struct qed_spq_comp_cb
*p_comp_data
)
325 struct qed_rss_params
*p_rss_params
= p_params
->rss_params
;
326 struct vport_update_ramrod_data_cmn
*p_cmn
;
327 struct qed_sp_init_data init_data
;
328 struct vport_update_ramrod_data
*p_ramrod
= NULL
;
329 struct qed_spq_entry
*p_ent
= NULL
;
330 u8 abs_vport_id
= 0, val
;
333 if (IS_VF(p_hwfn
->cdev
)) {
334 rc
= qed_vf_pf_vport_update(p_hwfn
, p_params
);
338 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
342 memset(&init_data
, 0, sizeof(init_data
));
343 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
344 init_data
.opaque_fid
= p_params
->opaque_fid
;
345 init_data
.comp_mode
= comp_mode
;
346 init_data
.p_comp_data
= p_comp_data
;
348 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
349 ETH_RAMROD_VPORT_UPDATE
,
350 PROTOCOLID_ETH
, &init_data
);
354 /* Copy input params to ramrod according to FW struct */
355 p_ramrod
= &p_ent
->ramrod
.vport_update
;
356 p_cmn
= &p_ramrod
->common
;
358 p_cmn
->vport_id
= abs_vport_id
;
359 p_cmn
->rx_active_flg
= p_params
->vport_active_rx_flg
;
360 p_cmn
->update_rx_active_flg
= p_params
->update_vport_active_rx_flg
;
361 p_cmn
->tx_active_flg
= p_params
->vport_active_tx_flg
;
362 p_cmn
->update_tx_active_flg
= p_params
->update_vport_active_tx_flg
;
363 p_cmn
->accept_any_vlan
= p_params
->accept_any_vlan
;
364 p_cmn
->update_accept_any_vlan_flg
=
365 p_params
->update_accept_any_vlan_flg
;
367 p_cmn
->inner_vlan_removal_en
= p_params
->inner_vlan_removal_flg
;
368 val
= p_params
->update_inner_vlan_removal_flg
;
369 p_cmn
->update_inner_vlan_removal_en_flg
= val
;
370 p_ramrod
->common
.tx_switching_en
= p_params
->tx_switching_flg
;
371 p_cmn
->update_tx_switching_en_flg
= p_params
->update_tx_switching_flg
;
373 rc
= qed_sp_vport_update_rss(p_hwfn
, p_ramrod
, p_rss_params
);
375 /* Return spq entry which is taken in qed_sp_init_request()*/
376 qed_spq_return_entry(p_hwfn
, p_ent
);
380 /* Update mcast bins for VFs, PF doesn't use this functionality */
381 qed_sp_update_mcast_bin(p_hwfn
, p_ramrod
, p_params
);
383 qed_sp_update_accept_mode(p_hwfn
, p_ramrod
, p_params
->accept_flags
);
384 qed_sp_vport_update_sge_tpa(p_hwfn
, p_ramrod
, p_params
->sge_tpa_params
);
385 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
388 int qed_sp_vport_stop(struct qed_hwfn
*p_hwfn
, u16 opaque_fid
, u8 vport_id
)
390 struct vport_stop_ramrod_data
*p_ramrod
;
391 struct qed_sp_init_data init_data
;
392 struct qed_spq_entry
*p_ent
;
396 if (IS_VF(p_hwfn
->cdev
))
397 return qed_vf_pf_vport_stop(p_hwfn
);
399 rc
= qed_fw_vport(p_hwfn
, vport_id
, &abs_vport_id
);
403 memset(&init_data
, 0, sizeof(init_data
));
404 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
405 init_data
.opaque_fid
= opaque_fid
;
406 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
408 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
409 ETH_RAMROD_VPORT_STOP
,
410 PROTOCOLID_ETH
, &init_data
);
414 p_ramrod
= &p_ent
->ramrod
.vport_stop
;
415 p_ramrod
->vport_id
= abs_vport_id
;
417 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
421 qed_vf_pf_accept_flags(struct qed_hwfn
*p_hwfn
,
422 struct qed_filter_accept_flags
*p_accept_flags
)
424 struct qed_sp_vport_update_params s_params
;
426 memset(&s_params
, 0, sizeof(s_params
));
427 memcpy(&s_params
.accept_flags
, p_accept_flags
,
428 sizeof(struct qed_filter_accept_flags
));
430 return qed_vf_pf_vport_update(p_hwfn
, &s_params
);
433 static int qed_filter_accept_cmd(struct qed_dev
*cdev
,
435 struct qed_filter_accept_flags accept_flags
,
436 u8 update_accept_any_vlan
,
438 enum spq_mode comp_mode
,
439 struct qed_spq_comp_cb
*p_comp_data
)
441 struct qed_sp_vport_update_params vport_update_params
;
444 /* Prepare and send the vport rx_mode change */
445 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
446 vport_update_params
.vport_id
= vport
;
447 vport_update_params
.accept_flags
= accept_flags
;
448 vport_update_params
.update_accept_any_vlan_flg
= update_accept_any_vlan
;
449 vport_update_params
.accept_any_vlan
= accept_any_vlan
;
451 for_each_hwfn(cdev
, i
) {
452 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
454 vport_update_params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
457 rc
= qed_vf_pf_accept_flags(p_hwfn
, &accept_flags
);
463 rc
= qed_sp_vport_update(p_hwfn
, &vport_update_params
,
464 comp_mode
, p_comp_data
);
466 DP_ERR(cdev
, "Update rx_mode failed %d\n", rc
);
470 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
471 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
472 accept_flags
.rx_accept_filter
,
473 accept_flags
.tx_accept_filter
);
474 if (update_accept_any_vlan
)
475 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
476 "accept_any_vlan=%d configured\n",
483 static int qed_sp_release_queue_cid(
484 struct qed_hwfn
*p_hwfn
,
485 struct qed_hw_cid_data
*p_cid_data
)
487 if (!p_cid_data
->b_cid_allocated
)
490 qed_cxt_release_cid(p_hwfn
, p_cid_data
->cid
);
492 p_cid_data
->b_cid_allocated
= false;
497 int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn
*p_hwfn
,
500 struct qed_queue_start_common_params
*params
,
503 dma_addr_t bd_chain_phys_addr
,
504 dma_addr_t cqe_pbl_addr
, u16 cqe_pbl_size
)
506 struct rx_queue_start_ramrod_data
*p_ramrod
= NULL
;
507 struct qed_spq_entry
*p_ent
= NULL
;
508 struct qed_sp_init_data init_data
;
509 struct qed_hw_cid_data
*p_rx_cid
;
514 /* Store information for the stop */
515 p_rx_cid
= &p_hwfn
->p_rx_cids
[params
->queue_id
];
517 p_rx_cid
->opaque_fid
= opaque_fid
;
518 p_rx_cid
->vport_id
= params
->vport_id
;
520 rc
= qed_fw_vport(p_hwfn
, params
->vport_id
, &abs_vport_id
);
524 rc
= qed_fw_l2_queue(p_hwfn
, params
->queue_id
, &abs_rx_q_id
);
528 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
529 "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
530 opaque_fid
, cid
, params
->queue_id
, params
->vport_id
,
534 memset(&init_data
, 0, sizeof(init_data
));
536 init_data
.opaque_fid
= opaque_fid
;
537 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
539 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
540 ETH_RAMROD_RX_QUEUE_START
,
541 PROTOCOLID_ETH
, &init_data
);
545 p_ramrod
= &p_ent
->ramrod
.rx_queue_start
;
547 p_ramrod
->sb_id
= cpu_to_le16(params
->sb
);
548 p_ramrod
->sb_index
= params
->sb_idx
;
549 p_ramrod
->vport_id
= abs_vport_id
;
550 p_ramrod
->stats_counter_id
= stats_id
;
551 p_ramrod
->rx_queue_id
= cpu_to_le16(abs_rx_q_id
);
552 p_ramrod
->complete_cqe_flg
= 0;
553 p_ramrod
->complete_event_flg
= 1;
555 p_ramrod
->bd_max_bytes
= cpu_to_le16(bd_max_bytes
);
556 DMA_REGPAIR_LE(p_ramrod
->bd_base
, bd_chain_phys_addr
);
558 p_ramrod
->num_of_pbl_pages
= cpu_to_le16(cqe_pbl_size
);
559 DMA_REGPAIR_LE(p_ramrod
->cqe_pbl_addr
, cqe_pbl_addr
);
561 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
567 qed_sp_eth_rx_queue_start(struct qed_hwfn
*p_hwfn
,
569 struct qed_queue_start_common_params
*params
,
571 dma_addr_t bd_chain_phys_addr
,
572 dma_addr_t cqe_pbl_addr
,
573 u16 cqe_pbl_size
, void __iomem
**pp_prod
)
575 struct qed_hw_cid_data
*p_rx_cid
;
576 u64 init_prod_val
= 0;
577 u16 abs_l2_queue
= 0;
581 if (IS_VF(p_hwfn
->cdev
)) {
582 return qed_vf_pf_rxq_start(p_hwfn
,
588 cqe_pbl_addr
, cqe_pbl_size
, pp_prod
);
591 rc
= qed_fw_l2_queue(p_hwfn
, params
->queue_id
, &abs_l2_queue
);
595 rc
= qed_fw_vport(p_hwfn
, params
->vport_id
, &abs_stats_id
);
599 *pp_prod
= (u8 __iomem
*)p_hwfn
->regview
+
600 GTT_BAR0_MAP_REG_MSDM_RAM
+
601 MSTORM_PRODS_OFFSET(abs_l2_queue
);
603 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
604 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u64
),
605 (u32
*)(&init_prod_val
));
607 /* Allocate a CID for the queue */
608 p_rx_cid
= &p_hwfn
->p_rx_cids
[params
->queue_id
];
609 rc
= qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_ETH
,
612 DP_NOTICE(p_hwfn
, "Failed to acquire cid\n");
615 p_rx_cid
->b_cid_allocated
= true;
617 rc
= qed_sp_eth_rxq_start_ramrod(p_hwfn
,
628 qed_sp_release_queue_cid(p_hwfn
, p_rx_cid
);
633 int qed_sp_eth_rx_queues_update(struct qed_hwfn
*p_hwfn
,
637 u8 complete_event_flg
,
638 enum spq_mode comp_mode
,
639 struct qed_spq_comp_cb
*p_comp_data
)
641 struct rx_queue_update_ramrod_data
*p_ramrod
= NULL
;
642 struct qed_spq_entry
*p_ent
= NULL
;
643 struct qed_sp_init_data init_data
;
644 struct qed_hw_cid_data
*p_rx_cid
;
645 u16 qid
, abs_rx_q_id
= 0;
649 memset(&init_data
, 0, sizeof(init_data
));
650 init_data
.comp_mode
= comp_mode
;
651 init_data
.p_comp_data
= p_comp_data
;
653 for (i
= 0; i
< num_rxqs
; i
++) {
654 qid
= rx_queue_id
+ i
;
655 p_rx_cid
= &p_hwfn
->p_rx_cids
[qid
];
658 init_data
.cid
= p_rx_cid
->cid
;
659 init_data
.opaque_fid
= p_rx_cid
->opaque_fid
;
661 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
662 ETH_RAMROD_RX_QUEUE_UPDATE
,
663 PROTOCOLID_ETH
, &init_data
);
667 p_ramrod
= &p_ent
->ramrod
.rx_queue_update
;
669 qed_fw_vport(p_hwfn
, p_rx_cid
->vport_id
, &p_ramrod
->vport_id
);
670 qed_fw_l2_queue(p_hwfn
, qid
, &abs_rx_q_id
);
671 p_ramrod
->rx_queue_id
= cpu_to_le16(abs_rx_q_id
);
672 p_ramrod
->complete_cqe_flg
= complete_cqe_flg
;
673 p_ramrod
->complete_event_flg
= complete_event_flg
;
675 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
683 int qed_sp_eth_rx_queue_stop(struct qed_hwfn
*p_hwfn
,
685 bool eq_completion_only
, bool cqe_completion
)
687 struct qed_hw_cid_data
*p_rx_cid
= &p_hwfn
->p_rx_cids
[rx_queue_id
];
688 struct rx_queue_stop_ramrod_data
*p_ramrod
= NULL
;
689 struct qed_spq_entry
*p_ent
= NULL
;
690 struct qed_sp_init_data init_data
;
694 if (IS_VF(p_hwfn
->cdev
))
695 return qed_vf_pf_rxq_stop(p_hwfn
, rx_queue_id
, cqe_completion
);
698 memset(&init_data
, 0, sizeof(init_data
));
699 init_data
.cid
= p_rx_cid
->cid
;
700 init_data
.opaque_fid
= p_rx_cid
->opaque_fid
;
701 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
703 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
704 ETH_RAMROD_RX_QUEUE_STOP
,
705 PROTOCOLID_ETH
, &init_data
);
709 p_ramrod
= &p_ent
->ramrod
.rx_queue_stop
;
711 qed_fw_vport(p_hwfn
, p_rx_cid
->vport_id
, &p_ramrod
->vport_id
);
712 qed_fw_l2_queue(p_hwfn
, rx_queue_id
, &abs_rx_q_id
);
713 p_ramrod
->rx_queue_id
= cpu_to_le16(abs_rx_q_id
);
715 /* Cleaning the queue requires the completion to arrive there.
716 * In addition, VFs require the answer to come as eqe to PF.
718 p_ramrod
->complete_cqe_flg
=
719 (!!(p_rx_cid
->opaque_fid
== p_hwfn
->hw_info
.opaque_fid
) &&
720 !eq_completion_only
) || cqe_completion
;
721 p_ramrod
->complete_event_flg
=
722 !(p_rx_cid
->opaque_fid
== p_hwfn
->hw_info
.opaque_fid
) ||
725 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
729 return qed_sp_release_queue_cid(p_hwfn
, p_rx_cid
);
732 int qed_sp_eth_txq_start_ramrod(struct qed_hwfn
*p_hwfn
,
735 struct qed_queue_start_common_params
*p_params
,
739 union qed_qm_pq_params
*p_pq_params
)
741 struct tx_queue_start_ramrod_data
*p_ramrod
= NULL
;
742 struct qed_spq_entry
*p_ent
= NULL
;
743 struct qed_sp_init_data init_data
;
744 struct qed_hw_cid_data
*p_tx_cid
;
749 /* Store information for the stop */
750 p_tx_cid
= &p_hwfn
->p_tx_cids
[p_params
->queue_id
];
752 p_tx_cid
->opaque_fid
= opaque_fid
;
754 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
759 memset(&init_data
, 0, sizeof(init_data
));
761 init_data
.opaque_fid
= opaque_fid
;
762 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
764 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
765 ETH_RAMROD_TX_QUEUE_START
,
766 PROTOCOLID_ETH
, &init_data
);
770 p_ramrod
= &p_ent
->ramrod
.tx_queue_start
;
771 p_ramrod
->vport_id
= abs_vport_id
;
773 p_ramrod
->sb_id
= cpu_to_le16(p_params
->sb
);
774 p_ramrod
->sb_index
= p_params
->sb_idx
;
775 p_ramrod
->stats_counter_id
= stats_id
;
777 p_ramrod
->pbl_size
= cpu_to_le16(pbl_size
);
778 DMA_REGPAIR_LE(p_ramrod
->pbl_base_addr
, pbl_addr
);
780 pq_id
= qed_get_qm_pq(p_hwfn
,
783 p_ramrod
->qm_pq_id
= cpu_to_le16(pq_id
);
785 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
789 qed_sp_eth_tx_queue_start(struct qed_hwfn
*p_hwfn
,
791 struct qed_queue_start_common_params
*p_params
,
793 u16 pbl_size
, void __iomem
**pp_doorbell
)
795 struct qed_hw_cid_data
*p_tx_cid
;
796 union qed_qm_pq_params pq_params
;
800 if (IS_VF(p_hwfn
->cdev
)) {
801 return qed_vf_pf_txq_start(p_hwfn
,
805 pbl_addr
, pbl_size
, pp_doorbell
);
808 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_stats_id
);
812 p_tx_cid
= &p_hwfn
->p_tx_cids
[p_params
->queue_id
];
813 memset(p_tx_cid
, 0, sizeof(*p_tx_cid
));
814 memset(&pq_params
, 0, sizeof(pq_params
));
816 /* Allocate a CID for the queue */
817 rc
= qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_ETH
,
820 DP_NOTICE(p_hwfn
, "Failed to acquire cid\n");
823 p_tx_cid
->b_cid_allocated
= true;
825 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
826 "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
827 opaque_fid
, p_tx_cid
->cid
,
828 p_params
->queue_id
, p_params
->vport_id
, p_params
->sb
);
830 rc
= qed_sp_eth_txq_start_ramrod(p_hwfn
,
839 *pp_doorbell
= (u8 __iomem
*)p_hwfn
->doorbells
+
840 qed_db_addr(p_tx_cid
->cid
, DQ_DEMS_LEGACY
);
843 qed_sp_release_queue_cid(p_hwfn
, p_tx_cid
);
848 int qed_sp_eth_tx_queue_stop(struct qed_hwfn
*p_hwfn
, u16 tx_queue_id
)
850 struct qed_hw_cid_data
*p_tx_cid
= &p_hwfn
->p_tx_cids
[tx_queue_id
];
851 struct qed_spq_entry
*p_ent
= NULL
;
852 struct qed_sp_init_data init_data
;
855 if (IS_VF(p_hwfn
->cdev
))
856 return qed_vf_pf_txq_stop(p_hwfn
, tx_queue_id
);
859 memset(&init_data
, 0, sizeof(init_data
));
860 init_data
.cid
= p_tx_cid
->cid
;
861 init_data
.opaque_fid
= p_tx_cid
->opaque_fid
;
862 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
864 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
865 ETH_RAMROD_TX_QUEUE_STOP
,
866 PROTOCOLID_ETH
, &init_data
);
870 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
874 return qed_sp_release_queue_cid(p_hwfn
, p_tx_cid
);
877 static enum eth_filter_action
878 qed_filter_action(enum qed_filter_opcode opcode
)
880 enum eth_filter_action action
= MAX_ETH_FILTER_ACTION
;
884 action
= ETH_FILTER_ACTION_ADD
;
886 case QED_FILTER_REMOVE
:
887 action
= ETH_FILTER_ACTION_REMOVE
;
889 case QED_FILTER_FLUSH
:
890 action
= ETH_FILTER_ACTION_REMOVE_ALL
;
893 action
= MAX_ETH_FILTER_ACTION
;
899 static void qed_set_fw_mac_addr(__le16
*fw_msb
,
904 ((u8
*)fw_msb
)[0] = mac
[1];
905 ((u8
*)fw_msb
)[1] = mac
[0];
906 ((u8
*)fw_mid
)[0] = mac
[3];
907 ((u8
*)fw_mid
)[1] = mac
[2];
908 ((u8
*)fw_lsb
)[0] = mac
[5];
909 ((u8
*)fw_lsb
)[1] = mac
[4];
913 qed_filter_ucast_common(struct qed_hwfn
*p_hwfn
,
915 struct qed_filter_ucast
*p_filter_cmd
,
916 struct vport_filter_update_ramrod_data
**pp_ramrod
,
917 struct qed_spq_entry
**pp_ent
,
918 enum spq_mode comp_mode
,
919 struct qed_spq_comp_cb
*p_comp_data
)
921 u8 vport_to_add_to
= 0, vport_to_remove_from
= 0;
922 struct vport_filter_update_ramrod_data
*p_ramrod
;
923 struct eth_filter_cmd
*p_first_filter
;
924 struct eth_filter_cmd
*p_second_filter
;
925 struct qed_sp_init_data init_data
;
926 enum eth_filter_action action
;
929 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_remove_from
,
930 &vport_to_remove_from
);
934 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_add_to
,
940 memset(&init_data
, 0, sizeof(init_data
));
941 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
942 init_data
.opaque_fid
= opaque_fid
;
943 init_data
.comp_mode
= comp_mode
;
944 init_data
.p_comp_data
= p_comp_data
;
946 rc
= qed_sp_init_request(p_hwfn
, pp_ent
,
947 ETH_RAMROD_FILTERS_UPDATE
,
948 PROTOCOLID_ETH
, &init_data
);
952 *pp_ramrod
= &(*pp_ent
)->ramrod
.vport_filter_update
;
953 p_ramrod
= *pp_ramrod
;
954 p_ramrod
->filter_cmd_hdr
.rx
= p_filter_cmd
->is_rx_filter
? 1 : 0;
955 p_ramrod
->filter_cmd_hdr
.tx
= p_filter_cmd
->is_tx_filter
? 1 : 0;
957 switch (p_filter_cmd
->opcode
) {
958 case QED_FILTER_REPLACE
:
959 case QED_FILTER_MOVE
:
960 p_ramrod
->filter_cmd_hdr
.cmd_cnt
= 2; break;
962 p_ramrod
->filter_cmd_hdr
.cmd_cnt
= 1; break;
965 p_first_filter
= &p_ramrod
->filter_cmds
[0];
966 p_second_filter
= &p_ramrod
->filter_cmds
[1];
968 switch (p_filter_cmd
->type
) {
970 p_first_filter
->type
= ETH_FILTER_TYPE_MAC
; break;
971 case QED_FILTER_VLAN
:
972 p_first_filter
->type
= ETH_FILTER_TYPE_VLAN
; break;
973 case QED_FILTER_MAC_VLAN
:
974 p_first_filter
->type
= ETH_FILTER_TYPE_PAIR
; break;
975 case QED_FILTER_INNER_MAC
:
976 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_MAC
; break;
977 case QED_FILTER_INNER_VLAN
:
978 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_VLAN
; break;
979 case QED_FILTER_INNER_PAIR
:
980 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_PAIR
; break;
981 case QED_FILTER_INNER_MAC_VNI_PAIR
:
982 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
;
984 case QED_FILTER_MAC_VNI_PAIR
:
985 p_first_filter
->type
= ETH_FILTER_TYPE_MAC_VNI_PAIR
; break;
987 p_first_filter
->type
= ETH_FILTER_TYPE_VNI
; break;
990 if ((p_first_filter
->type
== ETH_FILTER_TYPE_MAC
) ||
991 (p_first_filter
->type
== ETH_FILTER_TYPE_PAIR
) ||
992 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC
) ||
993 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_PAIR
) ||
994 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
) ||
995 (p_first_filter
->type
== ETH_FILTER_TYPE_MAC_VNI_PAIR
)) {
996 qed_set_fw_mac_addr(&p_first_filter
->mac_msb
,
997 &p_first_filter
->mac_mid
,
998 &p_first_filter
->mac_lsb
,
999 (u8
*)p_filter_cmd
->mac
);
1002 if ((p_first_filter
->type
== ETH_FILTER_TYPE_VLAN
) ||
1003 (p_first_filter
->type
== ETH_FILTER_TYPE_PAIR
) ||
1004 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_VLAN
) ||
1005 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_PAIR
))
1006 p_first_filter
->vlan_id
= cpu_to_le16(p_filter_cmd
->vlan
);
1008 if ((p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
) ||
1009 (p_first_filter
->type
== ETH_FILTER_TYPE_MAC_VNI_PAIR
) ||
1010 (p_first_filter
->type
== ETH_FILTER_TYPE_VNI
))
1011 p_first_filter
->vni
= cpu_to_le32(p_filter_cmd
->vni
);
1013 if (p_filter_cmd
->opcode
== QED_FILTER_MOVE
) {
1014 p_second_filter
->type
= p_first_filter
->type
;
1015 p_second_filter
->mac_msb
= p_first_filter
->mac_msb
;
1016 p_second_filter
->mac_mid
= p_first_filter
->mac_mid
;
1017 p_second_filter
->mac_lsb
= p_first_filter
->mac_lsb
;
1018 p_second_filter
->vlan_id
= p_first_filter
->vlan_id
;
1019 p_second_filter
->vni
= p_first_filter
->vni
;
1021 p_first_filter
->action
= ETH_FILTER_ACTION_REMOVE
;
1023 p_first_filter
->vport_id
= vport_to_remove_from
;
1025 p_second_filter
->action
= ETH_FILTER_ACTION_ADD
;
1026 p_second_filter
->vport_id
= vport_to_add_to
;
1027 } else if (p_filter_cmd
->opcode
== QED_FILTER_REPLACE
) {
1028 p_first_filter
->vport_id
= vport_to_add_to
;
1029 memcpy(p_second_filter
, p_first_filter
,
1030 sizeof(*p_second_filter
));
1031 p_first_filter
->action
= ETH_FILTER_ACTION_REMOVE_ALL
;
1032 p_second_filter
->action
= ETH_FILTER_ACTION_ADD
;
1034 action
= qed_filter_action(p_filter_cmd
->opcode
);
1036 if (action
== MAX_ETH_FILTER_ACTION
) {
1038 "%d is not supported yet\n",
1039 p_filter_cmd
->opcode
);
1043 p_first_filter
->action
= action
;
1044 p_first_filter
->vport_id
= (p_filter_cmd
->opcode
==
1045 QED_FILTER_REMOVE
) ?
1046 vport_to_remove_from
:
1053 int qed_sp_eth_filter_ucast(struct qed_hwfn
*p_hwfn
,
1055 struct qed_filter_ucast
*p_filter_cmd
,
1056 enum spq_mode comp_mode
,
1057 struct qed_spq_comp_cb
*p_comp_data
)
1059 struct vport_filter_update_ramrod_data
*p_ramrod
= NULL
;
1060 struct qed_spq_entry
*p_ent
= NULL
;
1061 struct eth_filter_cmd_header
*p_header
;
1064 rc
= qed_filter_ucast_common(p_hwfn
, opaque_fid
, p_filter_cmd
,
1066 comp_mode
, p_comp_data
);
1068 DP_ERR(p_hwfn
, "Uni. filter command failed %d\n", rc
);
1071 p_header
= &p_ramrod
->filter_cmd_hdr
;
1072 p_header
->assert_on_error
= p_filter_cmd
->assert_on_error
;
1074 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1077 "Unicast filter ADD command failed %d\n",
1082 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
1083 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1084 (p_filter_cmd
->opcode
== QED_FILTER_ADD
) ? "ADD" :
1085 ((p_filter_cmd
->opcode
== QED_FILTER_REMOVE
) ?
1087 ((p_filter_cmd
->opcode
== QED_FILTER_MOVE
) ?
1088 "MOVE" : "REPLACE")),
1089 (p_filter_cmd
->type
== QED_FILTER_MAC
) ? "MAC" :
1090 ((p_filter_cmd
->type
== QED_FILTER_VLAN
) ?
1091 "VLAN" : "MAC & VLAN"),
1092 p_ramrod
->filter_cmd_hdr
.cmd_cnt
,
1093 p_filter_cmd
->is_rx_filter
,
1094 p_filter_cmd
->is_tx_filter
);
1095 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
1096 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1097 p_filter_cmd
->vport_to_add_to
,
1098 p_filter_cmd
->vport_to_remove_from
,
1099 p_filter_cmd
->mac
[0],
1100 p_filter_cmd
->mac
[1],
1101 p_filter_cmd
->mac
[2],
1102 p_filter_cmd
->mac
[3],
1103 p_filter_cmd
->mac
[4],
1104 p_filter_cmd
->mac
[5],
1105 p_filter_cmd
->vlan
);
1110 /*******************************************************************************
1112 * Calculates crc 32 on a buffer
1113 * Note: crc32_length MUST be aligned to 8
1115 ******************************************************************************/
1116 static u32
qed_calc_crc32c(u8
*crc32_packet
,
1124 u8 current_byte
= 0;
1125 u32 crc32_result
= crc32_seed
;
1127 if ((!crc32_packet
) ||
1128 (crc32_length
== 0) ||
1129 ((crc32_length
% 8) != 0))
1130 return crc32_result
;
1131 for (byte
= 0; byte
< crc32_length
; byte
++) {
1132 current_byte
= crc32_packet
[byte
];
1133 for (bit
= 0; bit
< 8; bit
++) {
1134 msb
= (u8
)(crc32_result
>> 31);
1135 crc32_result
= crc32_result
<< 1;
1136 if (msb
!= (0x1 & (current_byte
>> bit
))) {
1137 crc32_result
= crc32_result
^ CRC32_POLY
;
1138 crc32_result
|= 1; /*crc32_result[0] = 1;*/
1142 return crc32_result
;
1145 static inline u32
qed_crc32c_le(u32 seed
,
1149 u32 packet_buf
[2] = { 0 };
1151 memcpy((u8
*)(&packet_buf
[0]), &mac
[0], 6);
1152 return qed_calc_crc32c((u8
*)packet_buf
, 8, seed
, 0);
1155 u8
qed_mcast_bin_from_mac(u8
*mac
)
1157 u32 crc
= qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED
,
1164 qed_sp_eth_filter_mcast(struct qed_hwfn
*p_hwfn
,
1166 struct qed_filter_mcast
*p_filter_cmd
,
1167 enum spq_mode comp_mode
,
1168 struct qed_spq_comp_cb
*p_comp_data
)
1170 unsigned long bins
[ETH_MULTICAST_MAC_BINS_IN_REGS
];
1171 struct vport_update_ramrod_data
*p_ramrod
= NULL
;
1172 struct qed_spq_entry
*p_ent
= NULL
;
1173 struct qed_sp_init_data init_data
;
1174 u8 abs_vport_id
= 0;
1177 if (p_filter_cmd
->opcode
== QED_FILTER_ADD
) {
1178 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_add_to
,
1183 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_remove_from
,
1190 memset(&init_data
, 0, sizeof(init_data
));
1191 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
1192 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1193 init_data
.comp_mode
= comp_mode
;
1194 init_data
.p_comp_data
= p_comp_data
;
1196 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1197 ETH_RAMROD_VPORT_UPDATE
,
1198 PROTOCOLID_ETH
, &init_data
);
1200 DP_ERR(p_hwfn
, "Multi-cast command failed %d\n", rc
);
1204 p_ramrod
= &p_ent
->ramrod
.vport_update
;
1205 p_ramrod
->common
.update_approx_mcast_flg
= 1;
1207 /* explicitly clear out the entire vector */
1208 memset(&p_ramrod
->approx_mcast
.bins
, 0,
1209 sizeof(p_ramrod
->approx_mcast
.bins
));
1210 memset(bins
, 0, sizeof(unsigned long) *
1211 ETH_MULTICAST_MAC_BINS_IN_REGS
);
1212 /* filter ADD op is explicit set op and it removes
1213 * any existing filters for the vport
1215 if (p_filter_cmd
->opcode
== QED_FILTER_ADD
) {
1216 for (i
= 0; i
< p_filter_cmd
->num_mc_addrs
; i
++) {
1219 bit
= qed_mcast_bin_from_mac(p_filter_cmd
->mac
[i
]);
1220 __set_bit(bit
, bins
);
1223 /* Convert to correct endianity */
1224 for (i
= 0; i
< ETH_MULTICAST_MAC_BINS_IN_REGS
; i
++) {
1225 u32
*p_bins
= (u32
*)bins
;
1226 struct vport_update_ramrod_mcast
*approx_mcast
;
1228 approx_mcast
= &p_ramrod
->approx_mcast
;
1229 approx_mcast
->bins
[i
] = cpu_to_le32(p_bins
[i
]);
1233 p_ramrod
->common
.vport_id
= abs_vport_id
;
1235 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1238 static int qed_filter_mcast_cmd(struct qed_dev
*cdev
,
1239 struct qed_filter_mcast
*p_filter_cmd
,
1240 enum spq_mode comp_mode
,
1241 struct qed_spq_comp_cb
*p_comp_data
)
1246 /* only ADD and REMOVE operations are supported for multi-cast */
1247 if ((p_filter_cmd
->opcode
!= QED_FILTER_ADD
&&
1248 (p_filter_cmd
->opcode
!= QED_FILTER_REMOVE
)) ||
1249 (p_filter_cmd
->num_mc_addrs
> QED_MAX_MC_ADDRS
))
1252 for_each_hwfn(cdev
, i
) {
1253 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1258 qed_vf_pf_filter_mcast(p_hwfn
, p_filter_cmd
);
1262 opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1264 rc
= qed_sp_eth_filter_mcast(p_hwfn
,
1273 static int qed_filter_ucast_cmd(struct qed_dev
*cdev
,
1274 struct qed_filter_ucast
*p_filter_cmd
,
1275 enum spq_mode comp_mode
,
1276 struct qed_spq_comp_cb
*p_comp_data
)
1281 for_each_hwfn(cdev
, i
) {
1282 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1286 rc
= qed_vf_pf_filter_ucast(p_hwfn
, p_filter_cmd
);
1290 opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1292 rc
= qed_sp_eth_filter_ucast(p_hwfn
,
1304 /* Statistics related code */
1305 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn
*p_hwfn
,
1307 u32
*p_len
, u16 statistics_bin
)
1309 if (IS_PF(p_hwfn
->cdev
)) {
1310 *p_addr
= BAR0_MAP_REG_PSDM_RAM
+
1311 PSTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1312 *p_len
= sizeof(struct eth_pstorm_per_queue_stat
);
1314 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1315 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1317 *p_addr
= p_resp
->pfdev_info
.stats_info
.pstats
.address
;
1318 *p_len
= p_resp
->pfdev_info
.stats_info
.pstats
.len
;
1322 static void __qed_get_vport_pstats(struct qed_hwfn
*p_hwfn
,
1323 struct qed_ptt
*p_ptt
,
1324 struct qed_eth_stats
*p_stats
,
1327 struct eth_pstorm_per_queue_stat pstats
;
1328 u32 pstats_addr
= 0, pstats_len
= 0;
1330 __qed_get_vport_pstats_addrlen(p_hwfn
, &pstats_addr
, &pstats_len
,
1333 memset(&pstats
, 0, sizeof(pstats
));
1334 qed_memcpy_from(p_hwfn
, p_ptt
, &pstats
, pstats_addr
, pstats_len
);
1336 p_stats
->tx_ucast_bytes
+= HILO_64_REGPAIR(pstats
.sent_ucast_bytes
);
1337 p_stats
->tx_mcast_bytes
+= HILO_64_REGPAIR(pstats
.sent_mcast_bytes
);
1338 p_stats
->tx_bcast_bytes
+= HILO_64_REGPAIR(pstats
.sent_bcast_bytes
);
1339 p_stats
->tx_ucast_pkts
+= HILO_64_REGPAIR(pstats
.sent_ucast_pkts
);
1340 p_stats
->tx_mcast_pkts
+= HILO_64_REGPAIR(pstats
.sent_mcast_pkts
);
1341 p_stats
->tx_bcast_pkts
+= HILO_64_REGPAIR(pstats
.sent_bcast_pkts
);
1342 p_stats
->tx_err_drop_pkts
+= HILO_64_REGPAIR(pstats
.error_drop_pkts
);
1345 static void __qed_get_vport_tstats(struct qed_hwfn
*p_hwfn
,
1346 struct qed_ptt
*p_ptt
,
1347 struct qed_eth_stats
*p_stats
,
1350 struct tstorm_per_port_stat tstats
;
1351 u32 tstats_addr
, tstats_len
;
1353 if (IS_PF(p_hwfn
->cdev
)) {
1354 tstats_addr
= BAR0_MAP_REG_TSDM_RAM
+
1355 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn
));
1356 tstats_len
= sizeof(struct tstorm_per_port_stat
);
1358 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1359 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1361 tstats_addr
= p_resp
->pfdev_info
.stats_info
.tstats
.address
;
1362 tstats_len
= p_resp
->pfdev_info
.stats_info
.tstats
.len
;
1365 memset(&tstats
, 0, sizeof(tstats
));
1366 qed_memcpy_from(p_hwfn
, p_ptt
, &tstats
, tstats_addr
, tstats_len
);
1368 p_stats
->mftag_filter_discards
+=
1369 HILO_64_REGPAIR(tstats
.mftag_filter_discard
);
1370 p_stats
->mac_filter_discards
+=
1371 HILO_64_REGPAIR(tstats
.eth_mac_filter_discard
);
1374 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn
*p_hwfn
,
1376 u32
*p_len
, u16 statistics_bin
)
1378 if (IS_PF(p_hwfn
->cdev
)) {
1379 *p_addr
= BAR0_MAP_REG_USDM_RAM
+
1380 USTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1381 *p_len
= sizeof(struct eth_ustorm_per_queue_stat
);
1383 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1384 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1386 *p_addr
= p_resp
->pfdev_info
.stats_info
.ustats
.address
;
1387 *p_len
= p_resp
->pfdev_info
.stats_info
.ustats
.len
;
1391 static void __qed_get_vport_ustats(struct qed_hwfn
*p_hwfn
,
1392 struct qed_ptt
*p_ptt
,
1393 struct qed_eth_stats
*p_stats
,
1396 struct eth_ustorm_per_queue_stat ustats
;
1397 u32 ustats_addr
= 0, ustats_len
= 0;
1399 __qed_get_vport_ustats_addrlen(p_hwfn
, &ustats_addr
, &ustats_len
,
1402 memset(&ustats
, 0, sizeof(ustats
));
1403 qed_memcpy_from(p_hwfn
, p_ptt
, &ustats
, ustats_addr
, ustats_len
);
1405 p_stats
->rx_ucast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_ucast_bytes
);
1406 p_stats
->rx_mcast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_mcast_bytes
);
1407 p_stats
->rx_bcast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_bcast_bytes
);
1408 p_stats
->rx_ucast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_ucast_pkts
);
1409 p_stats
->rx_mcast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_mcast_pkts
);
1410 p_stats
->rx_bcast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_bcast_pkts
);
1413 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn
*p_hwfn
,
1415 u32
*p_len
, u16 statistics_bin
)
1417 if (IS_PF(p_hwfn
->cdev
)) {
1418 *p_addr
= BAR0_MAP_REG_MSDM_RAM
+
1419 MSTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1420 *p_len
= sizeof(struct eth_mstorm_per_queue_stat
);
1422 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1423 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1425 *p_addr
= p_resp
->pfdev_info
.stats_info
.mstats
.address
;
1426 *p_len
= p_resp
->pfdev_info
.stats_info
.mstats
.len
;
1430 static void __qed_get_vport_mstats(struct qed_hwfn
*p_hwfn
,
1431 struct qed_ptt
*p_ptt
,
1432 struct qed_eth_stats
*p_stats
,
1435 struct eth_mstorm_per_queue_stat mstats
;
1436 u32 mstats_addr
= 0, mstats_len
= 0;
1438 __qed_get_vport_mstats_addrlen(p_hwfn
, &mstats_addr
, &mstats_len
,
1441 memset(&mstats
, 0, sizeof(mstats
));
1442 qed_memcpy_from(p_hwfn
, p_ptt
, &mstats
, mstats_addr
, mstats_len
);
1444 p_stats
->no_buff_discards
+= HILO_64_REGPAIR(mstats
.no_buff_discard
);
1445 p_stats
->packet_too_big_discard
+=
1446 HILO_64_REGPAIR(mstats
.packet_too_big_discard
);
1447 p_stats
->ttl0_discard
+= HILO_64_REGPAIR(mstats
.ttl0_discard
);
1448 p_stats
->tpa_coalesced_pkts
+=
1449 HILO_64_REGPAIR(mstats
.tpa_coalesced_pkts
);
1450 p_stats
->tpa_coalesced_events
+=
1451 HILO_64_REGPAIR(mstats
.tpa_coalesced_events
);
1452 p_stats
->tpa_aborts_num
+= HILO_64_REGPAIR(mstats
.tpa_aborts_num
);
1453 p_stats
->tpa_coalesced_bytes
+=
1454 HILO_64_REGPAIR(mstats
.tpa_coalesced_bytes
);
1457 static void __qed_get_vport_port_stats(struct qed_hwfn
*p_hwfn
,
1458 struct qed_ptt
*p_ptt
,
1459 struct qed_eth_stats
*p_stats
)
1461 struct port_stats port_stats
;
1464 memset(&port_stats
, 0, sizeof(port_stats
));
1466 qed_memcpy_from(p_hwfn
, p_ptt
, &port_stats
,
1467 p_hwfn
->mcp_info
->port_addr
+
1468 offsetof(struct public_port
, stats
),
1469 sizeof(port_stats
));
1471 p_stats
->rx_64_byte_packets
+= port_stats
.pmm
.r64
;
1472 p_stats
->rx_65_to_127_byte_packets
+= port_stats
.pmm
.r127
;
1473 p_stats
->rx_128_to_255_byte_packets
+= port_stats
.pmm
.r255
;
1474 p_stats
->rx_256_to_511_byte_packets
+= port_stats
.pmm
.r511
;
1475 p_stats
->rx_512_to_1023_byte_packets
+= port_stats
.pmm
.r1023
;
1476 p_stats
->rx_1024_to_1518_byte_packets
+= port_stats
.pmm
.r1518
;
1477 p_stats
->rx_1519_to_1522_byte_packets
+= port_stats
.pmm
.r1522
;
1478 p_stats
->rx_1519_to_2047_byte_packets
+= port_stats
.pmm
.r2047
;
1479 p_stats
->rx_2048_to_4095_byte_packets
+= port_stats
.pmm
.r4095
;
1480 p_stats
->rx_4096_to_9216_byte_packets
+= port_stats
.pmm
.r9216
;
1481 p_stats
->rx_9217_to_16383_byte_packets
+= port_stats
.pmm
.r16383
;
1482 p_stats
->rx_crc_errors
+= port_stats
.pmm
.rfcs
;
1483 p_stats
->rx_mac_crtl_frames
+= port_stats
.pmm
.rxcf
;
1484 p_stats
->rx_pause_frames
+= port_stats
.pmm
.rxpf
;
1485 p_stats
->rx_pfc_frames
+= port_stats
.pmm
.rxpp
;
1486 p_stats
->rx_align_errors
+= port_stats
.pmm
.raln
;
1487 p_stats
->rx_carrier_errors
+= port_stats
.pmm
.rfcr
;
1488 p_stats
->rx_oversize_packets
+= port_stats
.pmm
.rovr
;
1489 p_stats
->rx_jabbers
+= port_stats
.pmm
.rjbr
;
1490 p_stats
->rx_undersize_packets
+= port_stats
.pmm
.rund
;
1491 p_stats
->rx_fragments
+= port_stats
.pmm
.rfrg
;
1492 p_stats
->tx_64_byte_packets
+= port_stats
.pmm
.t64
;
1493 p_stats
->tx_65_to_127_byte_packets
+= port_stats
.pmm
.t127
;
1494 p_stats
->tx_128_to_255_byte_packets
+= port_stats
.pmm
.t255
;
1495 p_stats
->tx_256_to_511_byte_packets
+= port_stats
.pmm
.t511
;
1496 p_stats
->tx_512_to_1023_byte_packets
+= port_stats
.pmm
.t1023
;
1497 p_stats
->tx_1024_to_1518_byte_packets
+= port_stats
.pmm
.t1518
;
1498 p_stats
->tx_1519_to_2047_byte_packets
+= port_stats
.pmm
.t2047
;
1499 p_stats
->tx_2048_to_4095_byte_packets
+= port_stats
.pmm
.t4095
;
1500 p_stats
->tx_4096_to_9216_byte_packets
+= port_stats
.pmm
.t9216
;
1501 p_stats
->tx_9217_to_16383_byte_packets
+= port_stats
.pmm
.t16383
;
1502 p_stats
->tx_pause_frames
+= port_stats
.pmm
.txpf
;
1503 p_stats
->tx_pfc_frames
+= port_stats
.pmm
.txpp
;
1504 p_stats
->tx_lpi_entry_count
+= port_stats
.pmm
.tlpiec
;
1505 p_stats
->tx_total_collisions
+= port_stats
.pmm
.tncl
;
1506 p_stats
->rx_mac_bytes
+= port_stats
.pmm
.rbyte
;
1507 p_stats
->rx_mac_uc_packets
+= port_stats
.pmm
.rxuca
;
1508 p_stats
->rx_mac_mc_packets
+= port_stats
.pmm
.rxmca
;
1509 p_stats
->rx_mac_bc_packets
+= port_stats
.pmm
.rxbca
;
1510 p_stats
->rx_mac_frames_ok
+= port_stats
.pmm
.rxpok
;
1511 p_stats
->tx_mac_bytes
+= port_stats
.pmm
.tbyte
;
1512 p_stats
->tx_mac_uc_packets
+= port_stats
.pmm
.txuca
;
1513 p_stats
->tx_mac_mc_packets
+= port_stats
.pmm
.txmca
;
1514 p_stats
->tx_mac_bc_packets
+= port_stats
.pmm
.txbca
;
1515 p_stats
->tx_mac_ctrl_frames
+= port_stats
.pmm
.txcf
;
1516 for (j
= 0; j
< 8; j
++) {
1517 p_stats
->brb_truncates
+= port_stats
.brb
.brb_truncate
[j
];
1518 p_stats
->brb_discards
+= port_stats
.brb
.brb_discard
[j
];
1522 static void __qed_get_vport_stats(struct qed_hwfn
*p_hwfn
,
1523 struct qed_ptt
*p_ptt
,
1524 struct qed_eth_stats
*stats
,
1525 u16 statistics_bin
, bool b_get_port_stats
)
1527 __qed_get_vport_mstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1528 __qed_get_vport_ustats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1529 __qed_get_vport_tstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1530 __qed_get_vport_pstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1532 if (b_get_port_stats
&& p_hwfn
->mcp_info
)
1533 __qed_get_vport_port_stats(p_hwfn
, p_ptt
, stats
);
1536 static void _qed_get_vport_stats(struct qed_dev
*cdev
,
1537 struct qed_eth_stats
*stats
)
1542 memset(stats
, 0, sizeof(*stats
));
1544 for_each_hwfn(cdev
, i
) {
1545 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1546 struct qed_ptt
*p_ptt
= IS_PF(cdev
) ? qed_ptt_acquire(p_hwfn
)
1550 /* The main vport index is relative first */
1551 if (qed_fw_vport(p_hwfn
, 0, &fw_vport
)) {
1552 DP_ERR(p_hwfn
, "No vport available!\n");
1557 if (IS_PF(cdev
) && !p_ptt
) {
1558 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
1562 __qed_get_vport_stats(p_hwfn
, p_ptt
, stats
, fw_vport
,
1563 IS_PF(cdev
) ? true : false);
1566 if (IS_PF(cdev
) && p_ptt
)
1567 qed_ptt_release(p_hwfn
, p_ptt
);
1571 void qed_get_vport_stats(struct qed_dev
*cdev
,
1572 struct qed_eth_stats
*stats
)
1577 memset(stats
, 0, sizeof(*stats
));
1581 _qed_get_vport_stats(cdev
, stats
);
1583 if (!cdev
->reset_stats
)
1586 /* Reduce the statistics baseline */
1587 for (i
= 0; i
< sizeof(struct qed_eth_stats
) / sizeof(u64
); i
++)
1588 ((u64
*)stats
)[i
] -= ((u64
*)cdev
->reset_stats
)[i
];
1591 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1592 void qed_reset_vport_stats(struct qed_dev
*cdev
)
1596 for_each_hwfn(cdev
, i
) {
1597 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1598 struct eth_mstorm_per_queue_stat mstats
;
1599 struct eth_ustorm_per_queue_stat ustats
;
1600 struct eth_pstorm_per_queue_stat pstats
;
1601 struct qed_ptt
*p_ptt
= IS_PF(cdev
) ? qed_ptt_acquire(p_hwfn
)
1603 u32 addr
= 0, len
= 0;
1605 if (IS_PF(cdev
) && !p_ptt
) {
1606 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
1610 memset(&mstats
, 0, sizeof(mstats
));
1611 __qed_get_vport_mstats_addrlen(p_hwfn
, &addr
, &len
, 0);
1612 qed_memcpy_to(p_hwfn
, p_ptt
, addr
, &mstats
, len
);
1614 memset(&ustats
, 0, sizeof(ustats
));
1615 __qed_get_vport_ustats_addrlen(p_hwfn
, &addr
, &len
, 0);
1616 qed_memcpy_to(p_hwfn
, p_ptt
, addr
, &ustats
, len
);
1618 memset(&pstats
, 0, sizeof(pstats
));
1619 __qed_get_vport_pstats_addrlen(p_hwfn
, &addr
, &len
, 0);
1620 qed_memcpy_to(p_hwfn
, p_ptt
, addr
, &pstats
, len
);
1623 qed_ptt_release(p_hwfn
, p_ptt
);
1626 /* PORT statistics are not necessarily reset, so we need to
1627 * read and create a baseline for future statistics.
1629 if (!cdev
->reset_stats
)
1630 DP_INFO(cdev
, "Reset stats not allocated\n");
1632 _qed_get_vport_stats(cdev
, cdev
->reset_stats
);
1635 static int qed_fill_eth_dev_info(struct qed_dev
*cdev
,
1636 struct qed_dev_eth_info
*info
)
1640 memset(info
, 0, sizeof(*info
));
1645 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
1646 for_each_hwfn(cdev
, i
)
1648 FEAT_NUM(&cdev
->hwfns
[i
], QED_PF_L2_QUE
);
1649 if (cdev
->int_params
.fp_msix_cnt
)
1651 min_t(u8
, info
->num_queues
,
1652 cdev
->int_params
.fp_msix_cnt
);
1654 info
->num_queues
= cdev
->num_hwfns
;
1657 info
->num_vlan_filters
= RESC_NUM(&cdev
->hwfns
[0], QED_VLAN
);
1658 ether_addr_copy(info
->port_mac
,
1659 cdev
->hwfns
[0].hw_info
.hw_mac_addr
);
1661 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev
), &info
->num_queues
);
1662 if (cdev
->num_hwfns
> 1) {
1665 qed_vf_get_num_rxqs(&cdev
->hwfns
[1], &queues
);
1666 info
->num_queues
+= queues
;
1669 qed_vf_get_num_vlan_filters(&cdev
->hwfns
[0],
1670 &info
->num_vlan_filters
);
1671 qed_vf_get_port_mac(&cdev
->hwfns
[0], info
->port_mac
);
1674 qed_fill_dev_info(cdev
, &info
->common
);
1677 memset(info
->common
.hw_mac
, 0, ETH_ALEN
);
1682 static void qed_register_eth_ops(struct qed_dev
*cdev
,
1683 struct qed_eth_cb_ops
*ops
, void *cookie
)
1685 cdev
->protocol_ops
.eth
= ops
;
1686 cdev
->ops_cookie
= cookie
;
1688 /* For VF, we start bulletin reading */
1690 qed_vf_start_iov_wq(cdev
);
1693 static int qed_start_vport(struct qed_dev
*cdev
,
1694 struct qed_start_vport_params
*params
)
1698 for_each_hwfn(cdev
, i
) {
1699 struct qed_sp_vport_start_params start
= { 0 };
1700 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1702 start
.tpa_mode
= params
->gro_enable
? QED_TPA_MODE_GRO
:
1704 start
.remove_inner_vlan
= params
->remove_inner_vlan
;
1705 start
.drop_ttl0
= params
->drop_ttl0
;
1706 start
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1707 start
.concrete_fid
= p_hwfn
->hw_info
.concrete_fid
;
1708 start
.vport_id
= params
->vport_id
;
1709 start
.max_buffers_per_cqe
= 16;
1710 start
.mtu
= params
->mtu
;
1712 rc
= qed_sp_vport_start(p_hwfn
, &start
);
1714 DP_ERR(cdev
, "Failed to start VPORT\n");
1718 qed_hw_start_fastpath(p_hwfn
);
1720 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1721 "Started V-PORT %d with MTU %d\n",
1722 start
.vport_id
, start
.mtu
);
1725 qed_reset_vport_stats(cdev
);
1730 static int qed_stop_vport(struct qed_dev
*cdev
,
1735 for_each_hwfn(cdev
, i
) {
1736 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1738 rc
= qed_sp_vport_stop(p_hwfn
,
1739 p_hwfn
->hw_info
.opaque_fid
,
1743 DP_ERR(cdev
, "Failed to stop VPORT\n");
1750 static int qed_update_vport(struct qed_dev
*cdev
,
1751 struct qed_update_vport_params
*params
)
1753 struct qed_sp_vport_update_params sp_params
;
1754 struct qed_rss_params sp_rss_params
;
1760 memset(&sp_params
, 0, sizeof(sp_params
));
1761 memset(&sp_rss_params
, 0, sizeof(sp_rss_params
));
1763 /* Translate protocol params into sp params */
1764 sp_params
.vport_id
= params
->vport_id
;
1765 sp_params
.update_vport_active_rx_flg
=
1766 params
->update_vport_active_flg
;
1767 sp_params
.update_vport_active_tx_flg
=
1768 params
->update_vport_active_flg
;
1769 sp_params
.vport_active_rx_flg
= params
->vport_active_flg
;
1770 sp_params
.vport_active_tx_flg
= params
->vport_active_flg
;
1771 sp_params
.accept_any_vlan
= params
->accept_any_vlan
;
1772 sp_params
.update_accept_any_vlan_flg
=
1773 params
->update_accept_any_vlan_flg
;
1775 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
1776 * We need to re-fix the rss values per engine for CMT.
1778 if (cdev
->num_hwfns
> 1 && params
->update_rss_flg
) {
1779 struct qed_update_vport_rss_params
*rss
=
1780 ¶ms
->rss_params
;
1783 /* Find largest entry, since it's possible RSS needs to
1784 * be disabled [in case only 1 queue per-hwfn]
1786 for (k
= 0; k
< QED_RSS_IND_TABLE_SIZE
; k
++)
1787 max
= (max
> rss
->rss_ind_table
[k
]) ?
1788 max
: rss
->rss_ind_table
[k
];
1790 /* Either fix RSS values or disable RSS */
1791 if (cdev
->num_hwfns
< max
+ 1) {
1792 int divisor
= (max
+ cdev
->num_hwfns
- 1) /
1795 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1796 "CMT - fixing RSS values (modulo %02x)\n",
1799 for (k
= 0; k
< QED_RSS_IND_TABLE_SIZE
; k
++)
1800 rss
->rss_ind_table
[k
] =
1801 rss
->rss_ind_table
[k
] % divisor
;
1803 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1804 "CMT - 1 queue per-hwfn; Disabling RSS\n");
1805 params
->update_rss_flg
= 0;
1809 /* Now, update the RSS configuration for actual configuration */
1810 if (params
->update_rss_flg
) {
1811 sp_rss_params
.update_rss_config
= 1;
1812 sp_rss_params
.rss_enable
= 1;
1813 sp_rss_params
.update_rss_capabilities
= 1;
1814 sp_rss_params
.update_rss_ind_table
= 1;
1815 sp_rss_params
.update_rss_key
= 1;
1816 sp_rss_params
.rss_caps
= params
->rss_params
.rss_caps
;
1817 sp_rss_params
.rss_table_size_log
= 7; /* 2^7 = 128 */
1818 memcpy(sp_rss_params
.rss_ind_table
,
1819 params
->rss_params
.rss_ind_table
,
1820 QED_RSS_IND_TABLE_SIZE
* sizeof(u16
));
1821 memcpy(sp_rss_params
.rss_key
, params
->rss_params
.rss_key
,
1822 QED_RSS_KEY_SIZE
* sizeof(u32
));
1824 sp_params
.rss_params
= &sp_rss_params
;
1826 for_each_hwfn(cdev
, i
) {
1827 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1829 sp_params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1830 rc
= qed_sp_vport_update(p_hwfn
, &sp_params
,
1831 QED_SPQ_MODE_EBLOCK
,
1834 DP_ERR(cdev
, "Failed to update VPORT\n");
1838 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1839 "Updated V-PORT %d: active_flag %d [update %d]\n",
1840 params
->vport_id
, params
->vport_active_flg
,
1841 params
->update_vport_active_flg
);
1847 static int qed_start_rxq(struct qed_dev
*cdev
,
1848 struct qed_queue_start_common_params
*params
,
1850 dma_addr_t bd_chain_phys_addr
,
1851 dma_addr_t cqe_pbl_addr
,
1853 void __iomem
**pp_prod
)
1856 struct qed_hwfn
*p_hwfn
;
1858 hwfn_index
= params
->rss_id
% cdev
->num_hwfns
;
1859 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1861 /* Fix queue ID in 100g mode */
1862 params
->queue_id
/= cdev
->num_hwfns
;
1864 rc
= qed_sp_eth_rx_queue_start(p_hwfn
,
1865 p_hwfn
->hw_info
.opaque_fid
,
1874 DP_ERR(cdev
, "Failed to start RXQ#%d\n", params
->queue_id
);
1878 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1879 "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1880 params
->queue_id
, params
->rss_id
, params
->vport_id
,
1886 static int qed_stop_rxq(struct qed_dev
*cdev
,
1887 struct qed_stop_rxq_params
*params
)
1890 struct qed_hwfn
*p_hwfn
;
1892 hwfn_index
= params
->rss_id
% cdev
->num_hwfns
;
1893 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1895 rc
= qed_sp_eth_rx_queue_stop(p_hwfn
,
1896 params
->rx_queue_id
/ cdev
->num_hwfns
,
1897 params
->eq_completion_only
,
1900 DP_ERR(cdev
, "Failed to stop RXQ#%d\n", params
->rx_queue_id
);
1907 static int qed_start_txq(struct qed_dev
*cdev
,
1908 struct qed_queue_start_common_params
*p_params
,
1909 dma_addr_t pbl_addr
,
1911 void __iomem
**pp_doorbell
)
1913 struct qed_hwfn
*p_hwfn
;
1916 hwfn_index
= p_params
->rss_id
% cdev
->num_hwfns
;
1917 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1919 /* Fix queue ID in 100g mode */
1920 p_params
->queue_id
/= cdev
->num_hwfns
;
1922 rc
= qed_sp_eth_tx_queue_start(p_hwfn
,
1923 p_hwfn
->hw_info
.opaque_fid
,
1930 DP_ERR(cdev
, "Failed to start TXQ#%d\n", p_params
->queue_id
);
1934 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1935 "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1936 p_params
->queue_id
, p_params
->rss_id
, p_params
->vport_id
,
1942 #define QED_HW_STOP_RETRY_LIMIT (10)
1943 static int qed_fastpath_stop(struct qed_dev
*cdev
)
1945 qed_hw_stop_fastpath(cdev
);
1950 static int qed_stop_txq(struct qed_dev
*cdev
,
1951 struct qed_stop_txq_params
*params
)
1953 struct qed_hwfn
*p_hwfn
;
1956 hwfn_index
= params
->rss_id
% cdev
->num_hwfns
;
1957 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1959 rc
= qed_sp_eth_tx_queue_stop(p_hwfn
,
1960 params
->tx_queue_id
/ cdev
->num_hwfns
);
1962 DP_ERR(cdev
, "Failed to stop TXQ#%d\n", params
->tx_queue_id
);
1969 static int qed_tunn_configure(struct qed_dev
*cdev
,
1970 struct qed_tunn_params
*tunn_params
)
1972 struct qed_tunn_update_params tunn_info
;
1978 memset(&tunn_info
, 0, sizeof(tunn_info
));
1979 if (tunn_params
->update_vxlan_port
== 1) {
1980 tunn_info
.update_vxlan_udp_port
= 1;
1981 tunn_info
.vxlan_udp_port
= tunn_params
->vxlan_port
;
1984 if (tunn_params
->update_geneve_port
== 1) {
1985 tunn_info
.update_geneve_udp_port
= 1;
1986 tunn_info
.geneve_udp_port
= tunn_params
->geneve_port
;
1989 for_each_hwfn(cdev
, i
) {
1990 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
1992 rc
= qed_sp_pf_update_tunn_cfg(hwfn
, &tunn_info
,
1993 QED_SPQ_MODE_EBLOCK
, NULL
);
2002 static int qed_configure_filter_rx_mode(struct qed_dev
*cdev
,
2003 enum qed_filter_rx_mode_type type
)
2005 struct qed_filter_accept_flags accept_flags
;
2007 memset(&accept_flags
, 0, sizeof(accept_flags
));
2009 accept_flags
.update_rx_mode_config
= 1;
2010 accept_flags
.update_tx_mode_config
= 1;
2011 accept_flags
.rx_accept_filter
= QED_ACCEPT_UCAST_MATCHED
|
2012 QED_ACCEPT_MCAST_MATCHED
|
2014 accept_flags
.tx_accept_filter
= QED_ACCEPT_UCAST_MATCHED
|
2015 QED_ACCEPT_MCAST_MATCHED
|
2018 if (type
== QED_FILTER_RX_MODE_TYPE_PROMISC
)
2019 accept_flags
.rx_accept_filter
|= QED_ACCEPT_UCAST_UNMATCHED
|
2020 QED_ACCEPT_MCAST_UNMATCHED
;
2021 else if (type
== QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
)
2022 accept_flags
.rx_accept_filter
|= QED_ACCEPT_MCAST_UNMATCHED
;
2024 return qed_filter_accept_cmd(cdev
, 0, accept_flags
, false, false,
2025 QED_SPQ_MODE_CB
, NULL
);
2028 static int qed_configure_filter_ucast(struct qed_dev
*cdev
,
2029 struct qed_filter_ucast_params
*params
)
2031 struct qed_filter_ucast ucast
;
2033 if (!params
->vlan_valid
&& !params
->mac_valid
) {
2036 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2040 memset(&ucast
, 0, sizeof(ucast
));
2041 switch (params
->type
) {
2042 case QED_FILTER_XCAST_TYPE_ADD
:
2043 ucast
.opcode
= QED_FILTER_ADD
;
2045 case QED_FILTER_XCAST_TYPE_DEL
:
2046 ucast
.opcode
= QED_FILTER_REMOVE
;
2048 case QED_FILTER_XCAST_TYPE_REPLACE
:
2049 ucast
.opcode
= QED_FILTER_REPLACE
;
2052 DP_NOTICE(cdev
, "Unknown unicast filter type %d\n",
2056 if (params
->vlan_valid
&& params
->mac_valid
) {
2057 ucast
.type
= QED_FILTER_MAC_VLAN
;
2058 ether_addr_copy(ucast
.mac
, params
->mac
);
2059 ucast
.vlan
= params
->vlan
;
2060 } else if (params
->mac_valid
) {
2061 ucast
.type
= QED_FILTER_MAC
;
2062 ether_addr_copy(ucast
.mac
, params
->mac
);
2064 ucast
.type
= QED_FILTER_VLAN
;
2065 ucast
.vlan
= params
->vlan
;
2068 ucast
.is_rx_filter
= true;
2069 ucast
.is_tx_filter
= true;
2071 return qed_filter_ucast_cmd(cdev
, &ucast
, QED_SPQ_MODE_CB
, NULL
);
2074 static int qed_configure_filter_mcast(struct qed_dev
*cdev
,
2075 struct qed_filter_mcast_params
*params
)
2077 struct qed_filter_mcast mcast
;
2080 memset(&mcast
, 0, sizeof(mcast
));
2081 switch (params
->type
) {
2082 case QED_FILTER_XCAST_TYPE_ADD
:
2083 mcast
.opcode
= QED_FILTER_ADD
;
2085 case QED_FILTER_XCAST_TYPE_DEL
:
2086 mcast
.opcode
= QED_FILTER_REMOVE
;
2089 DP_NOTICE(cdev
, "Unknown multicast filter type %d\n",
2093 mcast
.num_mc_addrs
= params
->num
;
2094 for (i
= 0; i
< mcast
.num_mc_addrs
; i
++)
2095 ether_addr_copy(mcast
.mac
[i
], params
->mac
[i
]);
2097 return qed_filter_mcast_cmd(cdev
, &mcast
,
2098 QED_SPQ_MODE_CB
, NULL
);
2101 static int qed_configure_filter(struct qed_dev
*cdev
,
2102 struct qed_filter_params
*params
)
2104 enum qed_filter_rx_mode_type accept_flags
;
2106 switch (params
->type
) {
2107 case QED_FILTER_TYPE_UCAST
:
2108 return qed_configure_filter_ucast(cdev
, ¶ms
->filter
.ucast
);
2109 case QED_FILTER_TYPE_MCAST
:
2110 return qed_configure_filter_mcast(cdev
, ¶ms
->filter
.mcast
);
2111 case QED_FILTER_TYPE_RX_MODE
:
2112 accept_flags
= params
->filter
.accept_flags
;
2113 return qed_configure_filter_rx_mode(cdev
, accept_flags
);
2115 DP_NOTICE(cdev
, "Unknown filter type %d\n",
2121 static int qed_fp_cqe_completion(struct qed_dev
*dev
,
2123 struct eth_slow_path_rx_cqe
*cqe
)
2125 return qed_eth_cqe_completion(&dev
->hwfns
[rss_id
% dev
->num_hwfns
],
2129 #ifdef CONFIG_QED_SRIOV
2130 extern const struct qed_iov_hv_ops qed_iov_ops_pass
;
2133 static const struct qed_eth_ops qed_eth_ops_pass
= {
2134 .common
= &qed_common_ops_pass
,
2135 #ifdef CONFIG_QED_SRIOV
2136 .iov
= &qed_iov_ops_pass
,
2138 .fill_dev_info
= &qed_fill_eth_dev_info
,
2139 .register_ops
= &qed_register_eth_ops
,
2140 .vport_start
= &qed_start_vport
,
2141 .vport_stop
= &qed_stop_vport
,
2142 .vport_update
= &qed_update_vport
,
2143 .q_rx_start
= &qed_start_rxq
,
2144 .q_rx_stop
= &qed_stop_rxq
,
2145 .q_tx_start
= &qed_start_txq
,
2146 .q_tx_stop
= &qed_stop_txq
,
2147 .filter_config
= &qed_configure_filter
,
2148 .fastpath_stop
= &qed_fastpath_stop
,
2149 .eth_cqe_completion
= &qed_fp_cqe_completion
,
2150 .get_vport_stats
= &qed_get_vport_stats
,
2151 .tunn_config
= &qed_tunn_configure
,
2154 const struct qed_eth_ops
*qed_get_eth_ops(void)
2156 return &qed_eth_ops_pass
;
2158 EXPORT_SYMBOL(qed_get_eth_ops
);
2160 void qed_put_eth_ops(void)
2162 /* TODO - reference count for module? */
2164 EXPORT_SYMBOL(qed_put_eth_ops
);