1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <asm/param.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/etherdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/stddef.h>
21 #include <linux/string.h>
22 #include <linux/version.h>
23 #include <linux/workqueue.h>
24 #include <linux/bitops.h>
25 #include <linux/bug.h>
27 #include <linux/qed/qed_chain.h>
29 #include "qed_dev_api.h"
30 #include <linux/qed/qed_eth_if.h>
36 #include "qed_reg_addr.h"
38 #include "qed_sriov.h"
41 #define QED_MAX_SGES_NUM 16
42 #define CRC32_POLY 0x1edc6f41
44 int qed_sp_eth_vport_start(struct qed_hwfn
*p_hwfn
,
45 struct qed_sp_vport_start_params
*p_params
)
47 struct vport_start_ramrod_data
*p_ramrod
= NULL
;
48 struct qed_spq_entry
*p_ent
= NULL
;
49 struct qed_sp_init_data init_data
;
54 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
58 memset(&init_data
, 0, sizeof(init_data
));
59 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
60 init_data
.opaque_fid
= p_params
->opaque_fid
;
61 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
63 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
64 ETH_RAMROD_VPORT_START
,
65 PROTOCOLID_ETH
, &init_data
);
69 p_ramrod
= &p_ent
->ramrod
.vport_start
;
70 p_ramrod
->vport_id
= abs_vport_id
;
72 p_ramrod
->mtu
= cpu_to_le16(p_params
->mtu
);
73 p_ramrod
->inner_vlan_removal_en
= p_params
->remove_inner_vlan
;
74 p_ramrod
->drop_ttl0_en
= p_params
->drop_ttl0
;
75 p_ramrod
->untagged
= p_params
->only_untagged
;
77 SET_FIELD(rx_mode
, ETH_VPORT_RX_MODE_UCAST_DROP_ALL
, 1);
78 SET_FIELD(rx_mode
, ETH_VPORT_RX_MODE_MCAST_DROP_ALL
, 1);
80 p_ramrod
->rx_mode
.state
= cpu_to_le16(rx_mode
);
82 /* TPA related fields */
83 memset(&p_ramrod
->tpa_param
, 0,
84 sizeof(struct eth_vport_tpa_param
));
86 p_ramrod
->tpa_param
.max_buff_num
= p_params
->max_buffers_per_cqe
;
88 switch (p_params
->tpa_mode
) {
89 case QED_TPA_MODE_GRO
:
90 p_ramrod
->tpa_param
.tpa_max_aggs_num
= ETH_TPA_MAX_AGGS_NUM
;
91 p_ramrod
->tpa_param
.tpa_max_size
= (u16
)-1;
92 p_ramrod
->tpa_param
.tpa_min_size_to_cont
= p_params
->mtu
/ 2;
93 p_ramrod
->tpa_param
.tpa_min_size_to_start
= p_params
->mtu
/ 2;
94 p_ramrod
->tpa_param
.tpa_ipv4_en_flg
= 1;
95 p_ramrod
->tpa_param
.tpa_ipv6_en_flg
= 1;
96 p_ramrod
->tpa_param
.tpa_pkt_split_flg
= 1;
97 p_ramrod
->tpa_param
.tpa_gro_consistent_flg
= 1;
103 p_ramrod
->tx_switching_en
= p_params
->tx_switching
;
105 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
106 p_ramrod
->sw_fid
= qed_concrete_to_sw_fid(p_hwfn
->cdev
,
107 p_params
->concrete_fid
);
109 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
112 int qed_sp_vport_start(struct qed_hwfn
*p_hwfn
,
113 struct qed_sp_vport_start_params
*p_params
)
115 if (IS_VF(p_hwfn
->cdev
)) {
116 return qed_vf_pf_vport_start(p_hwfn
, p_params
->vport_id
,
118 p_params
->remove_inner_vlan
,
120 p_params
->max_buffers_per_cqe
,
121 p_params
->only_untagged
);
124 return qed_sp_eth_vport_start(p_hwfn
, p_params
);
128 qed_sp_vport_update_rss(struct qed_hwfn
*p_hwfn
,
129 struct vport_update_ramrod_data
*p_ramrod
,
130 struct qed_rss_params
*p_params
)
132 struct eth_vport_rss_config
*rss
= &p_ramrod
->rss_config
;
133 u16 abs_l2_queue
= 0, capabilities
= 0;
137 p_ramrod
->common
.update_rss_flg
= 0;
141 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE
!=
142 ETH_RSS_IND_TABLE_ENTRIES_NUM
);
144 rc
= qed_fw_rss_eng(p_hwfn
, p_params
->rss_eng_id
, &rss
->rss_id
);
148 p_ramrod
->common
.update_rss_flg
= p_params
->update_rss_config
;
149 rss
->update_rss_capabilities
= p_params
->update_rss_capabilities
;
150 rss
->update_rss_ind_table
= p_params
->update_rss_ind_table
;
151 rss
->update_rss_key
= p_params
->update_rss_key
;
153 rss
->rss_mode
= p_params
->rss_enable
?
154 ETH_VPORT_RSS_MODE_REGULAR
:
155 ETH_VPORT_RSS_MODE_DISABLED
;
157 SET_FIELD(capabilities
,
158 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY
,
159 !!(p_params
->rss_caps
& QED_RSS_IPV4
));
160 SET_FIELD(capabilities
,
161 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY
,
162 !!(p_params
->rss_caps
& QED_RSS_IPV6
));
163 SET_FIELD(capabilities
,
164 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY
,
165 !!(p_params
->rss_caps
& QED_RSS_IPV4_TCP
));
166 SET_FIELD(capabilities
,
167 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY
,
168 !!(p_params
->rss_caps
& QED_RSS_IPV6_TCP
));
169 SET_FIELD(capabilities
,
170 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY
,
171 !!(p_params
->rss_caps
& QED_RSS_IPV4_UDP
));
172 SET_FIELD(capabilities
,
173 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY
,
174 !!(p_params
->rss_caps
& QED_RSS_IPV6_UDP
));
175 rss
->tbl_size
= p_params
->rss_table_size_log
;
177 rss
->capabilities
= cpu_to_le16(capabilities
);
179 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
180 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
181 p_ramrod
->common
.update_rss_flg
,
182 rss
->rss_mode
, rss
->update_rss_capabilities
,
183 capabilities
, rss
->update_rss_ind_table
,
184 rss
->update_rss_key
);
186 for (i
= 0; i
< QED_RSS_IND_TABLE_SIZE
; i
++) {
187 rc
= qed_fw_l2_queue(p_hwfn
,
188 (u8
)p_params
->rss_ind_table
[i
],
193 rss
->indirection_table
[i
] = cpu_to_le16(abs_l2_queue
);
194 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
, "i= %d, queue = %d\n",
195 i
, rss
->indirection_table
[i
]);
198 for (i
= 0; i
< 10; i
++)
199 rss
->rss_key
[i
] = cpu_to_le32(p_params
->rss_key
[i
]);
205 qed_sp_update_accept_mode(struct qed_hwfn
*p_hwfn
,
206 struct vport_update_ramrod_data
*p_ramrod
,
207 struct qed_filter_accept_flags accept_flags
)
209 p_ramrod
->common
.update_rx_mode_flg
=
210 accept_flags
.update_rx_mode_config
;
212 p_ramrod
->common
.update_tx_mode_flg
=
213 accept_flags
.update_tx_mode_config
;
215 /* Set Rx mode accept flags */
216 if (p_ramrod
->common
.update_rx_mode_flg
) {
217 u8 accept_filter
= accept_flags
.rx_accept_filter
;
220 SET_FIELD(state
, ETH_VPORT_RX_MODE_UCAST_DROP_ALL
,
221 !(!!(accept_filter
& QED_ACCEPT_UCAST_MATCHED
) ||
222 !!(accept_filter
& QED_ACCEPT_UCAST_UNMATCHED
)));
224 SET_FIELD(state
, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED
,
225 !!(accept_filter
& QED_ACCEPT_UCAST_UNMATCHED
));
227 SET_FIELD(state
, ETH_VPORT_RX_MODE_MCAST_DROP_ALL
,
228 !(!!(accept_filter
& QED_ACCEPT_MCAST_MATCHED
) ||
229 !!(accept_filter
& QED_ACCEPT_MCAST_UNMATCHED
)));
231 SET_FIELD(state
, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL
,
232 (!!(accept_filter
& QED_ACCEPT_MCAST_MATCHED
) &&
233 !!(accept_filter
& QED_ACCEPT_MCAST_UNMATCHED
)));
235 SET_FIELD(state
, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL
,
236 !!(accept_filter
& QED_ACCEPT_BCAST
));
238 p_ramrod
->rx_mode
.state
= cpu_to_le16(state
);
239 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
240 "p_ramrod->rx_mode.state = 0x%x\n", state
);
243 /* Set Tx mode accept flags */
244 if (p_ramrod
->common
.update_tx_mode_flg
) {
245 u8 accept_filter
= accept_flags
.tx_accept_filter
;
248 SET_FIELD(state
, ETH_VPORT_TX_MODE_UCAST_DROP_ALL
,
249 !!(accept_filter
& QED_ACCEPT_NONE
));
251 SET_FIELD(state
, ETH_VPORT_TX_MODE_MCAST_DROP_ALL
,
252 !!(accept_filter
& QED_ACCEPT_NONE
));
254 SET_FIELD(state
, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL
,
255 (!!(accept_filter
& QED_ACCEPT_MCAST_MATCHED
) &&
256 !!(accept_filter
& QED_ACCEPT_MCAST_UNMATCHED
)));
258 SET_FIELD(state
, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL
,
259 !!(accept_filter
& QED_ACCEPT_BCAST
));
261 p_ramrod
->tx_mode
.state
= cpu_to_le16(state
);
262 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
263 "p_ramrod->tx_mode.state = 0x%x\n", state
);
268 qed_sp_vport_update_sge_tpa(struct qed_hwfn
*p_hwfn
,
269 struct vport_update_ramrod_data
*p_ramrod
,
270 struct qed_sge_tpa_params
*p_params
)
272 struct eth_vport_tpa_param
*p_tpa
;
275 p_ramrod
->common
.update_tpa_param_flg
= 0;
276 p_ramrod
->common
.update_tpa_en_flg
= 0;
277 p_ramrod
->common
.update_tpa_param_flg
= 0;
281 p_ramrod
->common
.update_tpa_en_flg
= p_params
->update_tpa_en_flg
;
282 p_tpa
= &p_ramrod
->tpa_param
;
283 p_tpa
->tpa_ipv4_en_flg
= p_params
->tpa_ipv4_en_flg
;
284 p_tpa
->tpa_ipv6_en_flg
= p_params
->tpa_ipv6_en_flg
;
285 p_tpa
->tpa_ipv4_tunn_en_flg
= p_params
->tpa_ipv4_tunn_en_flg
;
286 p_tpa
->tpa_ipv6_tunn_en_flg
= p_params
->tpa_ipv6_tunn_en_flg
;
288 p_ramrod
->common
.update_tpa_param_flg
= p_params
->update_tpa_param_flg
;
289 p_tpa
->max_buff_num
= p_params
->max_buffers_per_cqe
;
290 p_tpa
->tpa_pkt_split_flg
= p_params
->tpa_pkt_split_flg
;
291 p_tpa
->tpa_hdr_data_split_flg
= p_params
->tpa_hdr_data_split_flg
;
292 p_tpa
->tpa_gro_consistent_flg
= p_params
->tpa_gro_consistent_flg
;
293 p_tpa
->tpa_max_aggs_num
= p_params
->tpa_max_aggs_num
;
294 p_tpa
->tpa_max_size
= p_params
->tpa_max_size
;
295 p_tpa
->tpa_min_size_to_start
= p_params
->tpa_min_size_to_start
;
296 p_tpa
->tpa_min_size_to_cont
= p_params
->tpa_min_size_to_cont
;
300 qed_sp_update_mcast_bin(struct qed_hwfn
*p_hwfn
,
301 struct vport_update_ramrod_data
*p_ramrod
,
302 struct qed_sp_vport_update_params
*p_params
)
306 memset(&p_ramrod
->approx_mcast
.bins
, 0,
307 sizeof(p_ramrod
->approx_mcast
.bins
));
309 if (p_params
->update_approx_mcast_flg
) {
310 p_ramrod
->common
.update_approx_mcast_flg
= 1;
311 for (i
= 0; i
< ETH_MULTICAST_MAC_BINS_IN_REGS
; i
++) {
312 u32
*p_bins
= (u32
*)p_params
->bins
;
313 __le32 val
= cpu_to_le32(p_bins
[i
]);
315 p_ramrod
->approx_mcast
.bins
[i
] = val
;
320 int qed_sp_vport_update(struct qed_hwfn
*p_hwfn
,
321 struct qed_sp_vport_update_params
*p_params
,
322 enum spq_mode comp_mode
,
323 struct qed_spq_comp_cb
*p_comp_data
)
325 struct qed_rss_params
*p_rss_params
= p_params
->rss_params
;
326 struct vport_update_ramrod_data_cmn
*p_cmn
;
327 struct qed_sp_init_data init_data
;
328 struct vport_update_ramrod_data
*p_ramrod
= NULL
;
329 struct qed_spq_entry
*p_ent
= NULL
;
330 u8 abs_vport_id
= 0, val
;
333 if (IS_VF(p_hwfn
->cdev
)) {
334 rc
= qed_vf_pf_vport_update(p_hwfn
, p_params
);
338 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
342 memset(&init_data
, 0, sizeof(init_data
));
343 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
344 init_data
.opaque_fid
= p_params
->opaque_fid
;
345 init_data
.comp_mode
= comp_mode
;
346 init_data
.p_comp_data
= p_comp_data
;
348 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
349 ETH_RAMROD_VPORT_UPDATE
,
350 PROTOCOLID_ETH
, &init_data
);
354 /* Copy input params to ramrod according to FW struct */
355 p_ramrod
= &p_ent
->ramrod
.vport_update
;
356 p_cmn
= &p_ramrod
->common
;
358 p_cmn
->vport_id
= abs_vport_id
;
359 p_cmn
->rx_active_flg
= p_params
->vport_active_rx_flg
;
360 p_cmn
->update_rx_active_flg
= p_params
->update_vport_active_rx_flg
;
361 p_cmn
->tx_active_flg
= p_params
->vport_active_tx_flg
;
362 p_cmn
->update_tx_active_flg
= p_params
->update_vport_active_tx_flg
;
363 p_cmn
->accept_any_vlan
= p_params
->accept_any_vlan
;
364 p_cmn
->update_accept_any_vlan_flg
=
365 p_params
->update_accept_any_vlan_flg
;
367 p_cmn
->inner_vlan_removal_en
= p_params
->inner_vlan_removal_flg
;
368 val
= p_params
->update_inner_vlan_removal_flg
;
369 p_cmn
->update_inner_vlan_removal_en_flg
= val
;
371 p_cmn
->default_vlan_en
= p_params
->default_vlan_enable_flg
;
372 val
= p_params
->update_default_vlan_enable_flg
;
373 p_cmn
->update_default_vlan_en_flg
= val
;
375 p_cmn
->default_vlan
= cpu_to_le16(p_params
->default_vlan
);
376 p_cmn
->update_default_vlan_flg
= p_params
->update_default_vlan_flg
;
378 p_cmn
->silent_vlan_removal_en
= p_params
->silent_vlan_removal_flg
;
380 p_ramrod
->common
.tx_switching_en
= p_params
->tx_switching_flg
;
381 p_cmn
->update_tx_switching_en_flg
= p_params
->update_tx_switching_flg
;
383 p_cmn
->anti_spoofing_en
= p_params
->anti_spoofing_en
;
384 val
= p_params
->update_anti_spoofing_en_flg
;
385 p_ramrod
->common
.update_anti_spoofing_en_flg
= val
;
387 rc
= qed_sp_vport_update_rss(p_hwfn
, p_ramrod
, p_rss_params
);
389 /* Return spq entry which is taken in qed_sp_init_request()*/
390 qed_spq_return_entry(p_hwfn
, p_ent
);
394 /* Update mcast bins for VFs, PF doesn't use this functionality */
395 qed_sp_update_mcast_bin(p_hwfn
, p_ramrod
, p_params
);
397 qed_sp_update_accept_mode(p_hwfn
, p_ramrod
, p_params
->accept_flags
);
398 qed_sp_vport_update_sge_tpa(p_hwfn
, p_ramrod
, p_params
->sge_tpa_params
);
399 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
402 int qed_sp_vport_stop(struct qed_hwfn
*p_hwfn
, u16 opaque_fid
, u8 vport_id
)
404 struct vport_stop_ramrod_data
*p_ramrod
;
405 struct qed_sp_init_data init_data
;
406 struct qed_spq_entry
*p_ent
;
410 if (IS_VF(p_hwfn
->cdev
))
411 return qed_vf_pf_vport_stop(p_hwfn
);
413 rc
= qed_fw_vport(p_hwfn
, vport_id
, &abs_vport_id
);
417 memset(&init_data
, 0, sizeof(init_data
));
418 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
419 init_data
.opaque_fid
= opaque_fid
;
420 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
422 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
423 ETH_RAMROD_VPORT_STOP
,
424 PROTOCOLID_ETH
, &init_data
);
428 p_ramrod
= &p_ent
->ramrod
.vport_stop
;
429 p_ramrod
->vport_id
= abs_vport_id
;
431 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
435 qed_vf_pf_accept_flags(struct qed_hwfn
*p_hwfn
,
436 struct qed_filter_accept_flags
*p_accept_flags
)
438 struct qed_sp_vport_update_params s_params
;
440 memset(&s_params
, 0, sizeof(s_params
));
441 memcpy(&s_params
.accept_flags
, p_accept_flags
,
442 sizeof(struct qed_filter_accept_flags
));
444 return qed_vf_pf_vport_update(p_hwfn
, &s_params
);
447 static int qed_filter_accept_cmd(struct qed_dev
*cdev
,
449 struct qed_filter_accept_flags accept_flags
,
450 u8 update_accept_any_vlan
,
452 enum spq_mode comp_mode
,
453 struct qed_spq_comp_cb
*p_comp_data
)
455 struct qed_sp_vport_update_params vport_update_params
;
458 /* Prepare and send the vport rx_mode change */
459 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
460 vport_update_params
.vport_id
= vport
;
461 vport_update_params
.accept_flags
= accept_flags
;
462 vport_update_params
.update_accept_any_vlan_flg
= update_accept_any_vlan
;
463 vport_update_params
.accept_any_vlan
= accept_any_vlan
;
465 for_each_hwfn(cdev
, i
) {
466 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
468 vport_update_params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
471 rc
= qed_vf_pf_accept_flags(p_hwfn
, &accept_flags
);
477 rc
= qed_sp_vport_update(p_hwfn
, &vport_update_params
,
478 comp_mode
, p_comp_data
);
480 DP_ERR(cdev
, "Update rx_mode failed %d\n", rc
);
484 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
485 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
486 accept_flags
.rx_accept_filter
,
487 accept_flags
.tx_accept_filter
);
488 if (update_accept_any_vlan
)
489 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
490 "accept_any_vlan=%d configured\n",
497 static int qed_sp_release_queue_cid(
498 struct qed_hwfn
*p_hwfn
,
499 struct qed_hw_cid_data
*p_cid_data
)
501 if (!p_cid_data
->b_cid_allocated
)
504 qed_cxt_release_cid(p_hwfn
, p_cid_data
->cid
);
506 p_cid_data
->b_cid_allocated
= false;
511 int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn
*p_hwfn
,
514 struct qed_queue_start_common_params
*params
,
517 dma_addr_t bd_chain_phys_addr
,
518 dma_addr_t cqe_pbl_addr
, u16 cqe_pbl_size
)
520 struct rx_queue_start_ramrod_data
*p_ramrod
= NULL
;
521 struct qed_spq_entry
*p_ent
= NULL
;
522 struct qed_sp_init_data init_data
;
523 struct qed_hw_cid_data
*p_rx_cid
;
528 /* Store information for the stop */
529 p_rx_cid
= &p_hwfn
->p_rx_cids
[params
->queue_id
];
531 p_rx_cid
->opaque_fid
= opaque_fid
;
532 p_rx_cid
->vport_id
= params
->vport_id
;
534 rc
= qed_fw_vport(p_hwfn
, params
->vport_id
, &abs_vport_id
);
538 rc
= qed_fw_l2_queue(p_hwfn
, params
->queue_id
, &abs_rx_q_id
);
542 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
543 "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
544 opaque_fid
, cid
, params
->queue_id
, params
->vport_id
,
548 memset(&init_data
, 0, sizeof(init_data
));
550 init_data
.opaque_fid
= opaque_fid
;
551 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
553 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
554 ETH_RAMROD_RX_QUEUE_START
,
555 PROTOCOLID_ETH
, &init_data
);
559 p_ramrod
= &p_ent
->ramrod
.rx_queue_start
;
561 p_ramrod
->sb_id
= cpu_to_le16(params
->sb
);
562 p_ramrod
->sb_index
= params
->sb_idx
;
563 p_ramrod
->vport_id
= abs_vport_id
;
564 p_ramrod
->stats_counter_id
= stats_id
;
565 p_ramrod
->rx_queue_id
= cpu_to_le16(abs_rx_q_id
);
566 p_ramrod
->complete_cqe_flg
= 0;
567 p_ramrod
->complete_event_flg
= 1;
569 p_ramrod
->bd_max_bytes
= cpu_to_le16(bd_max_bytes
);
570 DMA_REGPAIR_LE(p_ramrod
->bd_base
, bd_chain_phys_addr
);
572 p_ramrod
->num_of_pbl_pages
= cpu_to_le16(cqe_pbl_size
);
573 DMA_REGPAIR_LE(p_ramrod
->cqe_pbl_addr
, cqe_pbl_addr
);
575 p_ramrod
->vf_rx_prod_index
= params
->vf_qid
;
577 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
578 "Queue is meant for VF rxq[%04x]\n", params
->vf_qid
);
580 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
584 qed_sp_eth_rx_queue_start(struct qed_hwfn
*p_hwfn
,
586 struct qed_queue_start_common_params
*params
,
588 dma_addr_t bd_chain_phys_addr
,
589 dma_addr_t cqe_pbl_addr
,
590 u16 cqe_pbl_size
, void __iomem
**pp_prod
)
592 struct qed_hw_cid_data
*p_rx_cid
;
593 u64 init_prod_val
= 0;
594 u16 abs_l2_queue
= 0;
598 if (IS_VF(p_hwfn
->cdev
)) {
599 return qed_vf_pf_rxq_start(p_hwfn
,
605 cqe_pbl_addr
, cqe_pbl_size
, pp_prod
);
608 rc
= qed_fw_l2_queue(p_hwfn
, params
->queue_id
, &abs_l2_queue
);
612 rc
= qed_fw_vport(p_hwfn
, params
->vport_id
, &abs_stats_id
);
616 *pp_prod
= (u8 __iomem
*)p_hwfn
->regview
+
617 GTT_BAR0_MAP_REG_MSDM_RAM
+
618 MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue
);
620 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
621 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u64
),
622 (u32
*)(&init_prod_val
));
624 /* Allocate a CID for the queue */
625 p_rx_cid
= &p_hwfn
->p_rx_cids
[params
->queue_id
];
626 rc
= qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_ETH
,
629 DP_NOTICE(p_hwfn
, "Failed to acquire cid\n");
632 p_rx_cid
->b_cid_allocated
= true;
634 rc
= qed_sp_eth_rxq_start_ramrod(p_hwfn
,
645 qed_sp_release_queue_cid(p_hwfn
, p_rx_cid
);
650 int qed_sp_eth_rx_queues_update(struct qed_hwfn
*p_hwfn
,
654 u8 complete_event_flg
,
655 enum spq_mode comp_mode
,
656 struct qed_spq_comp_cb
*p_comp_data
)
658 struct rx_queue_update_ramrod_data
*p_ramrod
= NULL
;
659 struct qed_spq_entry
*p_ent
= NULL
;
660 struct qed_sp_init_data init_data
;
661 struct qed_hw_cid_data
*p_rx_cid
;
662 u16 qid
, abs_rx_q_id
= 0;
666 memset(&init_data
, 0, sizeof(init_data
));
667 init_data
.comp_mode
= comp_mode
;
668 init_data
.p_comp_data
= p_comp_data
;
670 for (i
= 0; i
< num_rxqs
; i
++) {
671 qid
= rx_queue_id
+ i
;
672 p_rx_cid
= &p_hwfn
->p_rx_cids
[qid
];
675 init_data
.cid
= p_rx_cid
->cid
;
676 init_data
.opaque_fid
= p_rx_cid
->opaque_fid
;
678 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
679 ETH_RAMROD_RX_QUEUE_UPDATE
,
680 PROTOCOLID_ETH
, &init_data
);
684 p_ramrod
= &p_ent
->ramrod
.rx_queue_update
;
686 qed_fw_vport(p_hwfn
, p_rx_cid
->vport_id
, &p_ramrod
->vport_id
);
687 qed_fw_l2_queue(p_hwfn
, qid
, &abs_rx_q_id
);
688 p_ramrod
->rx_queue_id
= cpu_to_le16(abs_rx_q_id
);
689 p_ramrod
->complete_cqe_flg
= complete_cqe_flg
;
690 p_ramrod
->complete_event_flg
= complete_event_flg
;
692 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
700 int qed_sp_eth_rx_queue_stop(struct qed_hwfn
*p_hwfn
,
702 bool eq_completion_only
, bool cqe_completion
)
704 struct qed_hw_cid_data
*p_rx_cid
= &p_hwfn
->p_rx_cids
[rx_queue_id
];
705 struct rx_queue_stop_ramrod_data
*p_ramrod
= NULL
;
706 struct qed_spq_entry
*p_ent
= NULL
;
707 struct qed_sp_init_data init_data
;
711 if (IS_VF(p_hwfn
->cdev
))
712 return qed_vf_pf_rxq_stop(p_hwfn
, rx_queue_id
, cqe_completion
);
715 memset(&init_data
, 0, sizeof(init_data
));
716 init_data
.cid
= p_rx_cid
->cid
;
717 init_data
.opaque_fid
= p_rx_cid
->opaque_fid
;
718 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
720 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
721 ETH_RAMROD_RX_QUEUE_STOP
,
722 PROTOCOLID_ETH
, &init_data
);
726 p_ramrod
= &p_ent
->ramrod
.rx_queue_stop
;
728 qed_fw_vport(p_hwfn
, p_rx_cid
->vport_id
, &p_ramrod
->vport_id
);
729 qed_fw_l2_queue(p_hwfn
, rx_queue_id
, &abs_rx_q_id
);
730 p_ramrod
->rx_queue_id
= cpu_to_le16(abs_rx_q_id
);
732 /* Cleaning the queue requires the completion to arrive there.
733 * In addition, VFs require the answer to come as eqe to PF.
735 p_ramrod
->complete_cqe_flg
=
736 (!!(p_rx_cid
->opaque_fid
== p_hwfn
->hw_info
.opaque_fid
) &&
737 !eq_completion_only
) || cqe_completion
;
738 p_ramrod
->complete_event_flg
=
739 !(p_rx_cid
->opaque_fid
== p_hwfn
->hw_info
.opaque_fid
) ||
742 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
746 return qed_sp_release_queue_cid(p_hwfn
, p_rx_cid
);
749 int qed_sp_eth_txq_start_ramrod(struct qed_hwfn
*p_hwfn
,
752 struct qed_queue_start_common_params
*p_params
,
756 union qed_qm_pq_params
*p_pq_params
)
758 struct tx_queue_start_ramrod_data
*p_ramrod
= NULL
;
759 struct qed_spq_entry
*p_ent
= NULL
;
760 struct qed_sp_init_data init_data
;
761 struct qed_hw_cid_data
*p_tx_cid
;
762 u16 pq_id
, abs_tx_q_id
= 0;
766 /* Store information for the stop */
767 p_tx_cid
= &p_hwfn
->p_tx_cids
[p_params
->queue_id
];
769 p_tx_cid
->opaque_fid
= opaque_fid
;
771 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
775 rc
= qed_fw_l2_queue(p_hwfn
, p_params
->queue_id
, &abs_tx_q_id
);
780 memset(&init_data
, 0, sizeof(init_data
));
782 init_data
.opaque_fid
= opaque_fid
;
783 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
785 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
786 ETH_RAMROD_TX_QUEUE_START
,
787 PROTOCOLID_ETH
, &init_data
);
791 p_ramrod
= &p_ent
->ramrod
.tx_queue_start
;
792 p_ramrod
->vport_id
= abs_vport_id
;
794 p_ramrod
->sb_id
= cpu_to_le16(p_params
->sb
);
795 p_ramrod
->sb_index
= p_params
->sb_idx
;
796 p_ramrod
->stats_counter_id
= stats_id
;
798 p_ramrod
->queue_zone_id
= cpu_to_le16(abs_tx_q_id
);
799 p_ramrod
->pbl_size
= cpu_to_le16(pbl_size
);
800 DMA_REGPAIR_LE(p_ramrod
->pbl_base_addr
, pbl_addr
);
802 pq_id
= qed_get_qm_pq(p_hwfn
,
805 p_ramrod
->qm_pq_id
= cpu_to_le16(pq_id
);
807 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
811 qed_sp_eth_tx_queue_start(struct qed_hwfn
*p_hwfn
,
813 struct qed_queue_start_common_params
*p_params
,
815 u16 pbl_size
, void __iomem
**pp_doorbell
)
817 struct qed_hw_cid_data
*p_tx_cid
;
818 union qed_qm_pq_params pq_params
;
822 if (IS_VF(p_hwfn
->cdev
)) {
823 return qed_vf_pf_txq_start(p_hwfn
,
827 pbl_addr
, pbl_size
, pp_doorbell
);
830 rc
= qed_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_stats_id
);
834 p_tx_cid
= &p_hwfn
->p_tx_cids
[p_params
->queue_id
];
835 memset(p_tx_cid
, 0, sizeof(*p_tx_cid
));
836 memset(&pq_params
, 0, sizeof(pq_params
));
838 /* Allocate a CID for the queue */
839 rc
= qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_ETH
,
842 DP_NOTICE(p_hwfn
, "Failed to acquire cid\n");
845 p_tx_cid
->b_cid_allocated
= true;
847 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
848 "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
849 opaque_fid
, p_tx_cid
->cid
,
850 p_params
->queue_id
, p_params
->vport_id
, p_params
->sb
);
852 rc
= qed_sp_eth_txq_start_ramrod(p_hwfn
,
861 *pp_doorbell
= (u8 __iomem
*)p_hwfn
->doorbells
+
862 qed_db_addr(p_tx_cid
->cid
, DQ_DEMS_LEGACY
);
865 qed_sp_release_queue_cid(p_hwfn
, p_tx_cid
);
870 int qed_sp_eth_tx_queue_stop(struct qed_hwfn
*p_hwfn
, u16 tx_queue_id
)
872 struct qed_hw_cid_data
*p_tx_cid
= &p_hwfn
->p_tx_cids
[tx_queue_id
];
873 struct qed_spq_entry
*p_ent
= NULL
;
874 struct qed_sp_init_data init_data
;
877 if (IS_VF(p_hwfn
->cdev
))
878 return qed_vf_pf_txq_stop(p_hwfn
, tx_queue_id
);
881 memset(&init_data
, 0, sizeof(init_data
));
882 init_data
.cid
= p_tx_cid
->cid
;
883 init_data
.opaque_fid
= p_tx_cid
->opaque_fid
;
884 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
886 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
887 ETH_RAMROD_TX_QUEUE_STOP
,
888 PROTOCOLID_ETH
, &init_data
);
892 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
896 return qed_sp_release_queue_cid(p_hwfn
, p_tx_cid
);
899 static enum eth_filter_action
900 qed_filter_action(enum qed_filter_opcode opcode
)
902 enum eth_filter_action action
= MAX_ETH_FILTER_ACTION
;
906 action
= ETH_FILTER_ACTION_ADD
;
908 case QED_FILTER_REMOVE
:
909 action
= ETH_FILTER_ACTION_REMOVE
;
911 case QED_FILTER_FLUSH
:
912 action
= ETH_FILTER_ACTION_REMOVE_ALL
;
915 action
= MAX_ETH_FILTER_ACTION
;
921 static void qed_set_fw_mac_addr(__le16
*fw_msb
,
926 ((u8
*)fw_msb
)[0] = mac
[1];
927 ((u8
*)fw_msb
)[1] = mac
[0];
928 ((u8
*)fw_mid
)[0] = mac
[3];
929 ((u8
*)fw_mid
)[1] = mac
[2];
930 ((u8
*)fw_lsb
)[0] = mac
[5];
931 ((u8
*)fw_lsb
)[1] = mac
[4];
935 qed_filter_ucast_common(struct qed_hwfn
*p_hwfn
,
937 struct qed_filter_ucast
*p_filter_cmd
,
938 struct vport_filter_update_ramrod_data
**pp_ramrod
,
939 struct qed_spq_entry
**pp_ent
,
940 enum spq_mode comp_mode
,
941 struct qed_spq_comp_cb
*p_comp_data
)
943 u8 vport_to_add_to
= 0, vport_to_remove_from
= 0;
944 struct vport_filter_update_ramrod_data
*p_ramrod
;
945 struct eth_filter_cmd
*p_first_filter
;
946 struct eth_filter_cmd
*p_second_filter
;
947 struct qed_sp_init_data init_data
;
948 enum eth_filter_action action
;
951 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_remove_from
,
952 &vport_to_remove_from
);
956 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_add_to
,
962 memset(&init_data
, 0, sizeof(init_data
));
963 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
964 init_data
.opaque_fid
= opaque_fid
;
965 init_data
.comp_mode
= comp_mode
;
966 init_data
.p_comp_data
= p_comp_data
;
968 rc
= qed_sp_init_request(p_hwfn
, pp_ent
,
969 ETH_RAMROD_FILTERS_UPDATE
,
970 PROTOCOLID_ETH
, &init_data
);
974 *pp_ramrod
= &(*pp_ent
)->ramrod
.vport_filter_update
;
975 p_ramrod
= *pp_ramrod
;
976 p_ramrod
->filter_cmd_hdr
.rx
= p_filter_cmd
->is_rx_filter
? 1 : 0;
977 p_ramrod
->filter_cmd_hdr
.tx
= p_filter_cmd
->is_tx_filter
? 1 : 0;
979 switch (p_filter_cmd
->opcode
) {
980 case QED_FILTER_REPLACE
:
981 case QED_FILTER_MOVE
:
982 p_ramrod
->filter_cmd_hdr
.cmd_cnt
= 2; break;
984 p_ramrod
->filter_cmd_hdr
.cmd_cnt
= 1; break;
987 p_first_filter
= &p_ramrod
->filter_cmds
[0];
988 p_second_filter
= &p_ramrod
->filter_cmds
[1];
990 switch (p_filter_cmd
->type
) {
992 p_first_filter
->type
= ETH_FILTER_TYPE_MAC
; break;
993 case QED_FILTER_VLAN
:
994 p_first_filter
->type
= ETH_FILTER_TYPE_VLAN
; break;
995 case QED_FILTER_MAC_VLAN
:
996 p_first_filter
->type
= ETH_FILTER_TYPE_PAIR
; break;
997 case QED_FILTER_INNER_MAC
:
998 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_MAC
; break;
999 case QED_FILTER_INNER_VLAN
:
1000 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_VLAN
; break;
1001 case QED_FILTER_INNER_PAIR
:
1002 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_PAIR
; break;
1003 case QED_FILTER_INNER_MAC_VNI_PAIR
:
1004 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
;
1006 case QED_FILTER_MAC_VNI_PAIR
:
1007 p_first_filter
->type
= ETH_FILTER_TYPE_MAC_VNI_PAIR
; break;
1008 case QED_FILTER_VNI
:
1009 p_first_filter
->type
= ETH_FILTER_TYPE_VNI
; break;
1012 if ((p_first_filter
->type
== ETH_FILTER_TYPE_MAC
) ||
1013 (p_first_filter
->type
== ETH_FILTER_TYPE_PAIR
) ||
1014 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC
) ||
1015 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_PAIR
) ||
1016 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
) ||
1017 (p_first_filter
->type
== ETH_FILTER_TYPE_MAC_VNI_PAIR
)) {
1018 qed_set_fw_mac_addr(&p_first_filter
->mac_msb
,
1019 &p_first_filter
->mac_mid
,
1020 &p_first_filter
->mac_lsb
,
1021 (u8
*)p_filter_cmd
->mac
);
1024 if ((p_first_filter
->type
== ETH_FILTER_TYPE_VLAN
) ||
1025 (p_first_filter
->type
== ETH_FILTER_TYPE_PAIR
) ||
1026 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_VLAN
) ||
1027 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_PAIR
))
1028 p_first_filter
->vlan_id
= cpu_to_le16(p_filter_cmd
->vlan
);
1030 if ((p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
) ||
1031 (p_first_filter
->type
== ETH_FILTER_TYPE_MAC_VNI_PAIR
) ||
1032 (p_first_filter
->type
== ETH_FILTER_TYPE_VNI
))
1033 p_first_filter
->vni
= cpu_to_le32(p_filter_cmd
->vni
);
1035 if (p_filter_cmd
->opcode
== QED_FILTER_MOVE
) {
1036 p_second_filter
->type
= p_first_filter
->type
;
1037 p_second_filter
->mac_msb
= p_first_filter
->mac_msb
;
1038 p_second_filter
->mac_mid
= p_first_filter
->mac_mid
;
1039 p_second_filter
->mac_lsb
= p_first_filter
->mac_lsb
;
1040 p_second_filter
->vlan_id
= p_first_filter
->vlan_id
;
1041 p_second_filter
->vni
= p_first_filter
->vni
;
1043 p_first_filter
->action
= ETH_FILTER_ACTION_REMOVE
;
1045 p_first_filter
->vport_id
= vport_to_remove_from
;
1047 p_second_filter
->action
= ETH_FILTER_ACTION_ADD
;
1048 p_second_filter
->vport_id
= vport_to_add_to
;
1049 } else if (p_filter_cmd
->opcode
== QED_FILTER_REPLACE
) {
1050 p_first_filter
->vport_id
= vport_to_add_to
;
1051 memcpy(p_second_filter
, p_first_filter
,
1052 sizeof(*p_second_filter
));
1053 p_first_filter
->action
= ETH_FILTER_ACTION_REMOVE_ALL
;
1054 p_second_filter
->action
= ETH_FILTER_ACTION_ADD
;
1056 action
= qed_filter_action(p_filter_cmd
->opcode
);
1058 if (action
== MAX_ETH_FILTER_ACTION
) {
1060 "%d is not supported yet\n",
1061 p_filter_cmd
->opcode
);
1065 p_first_filter
->action
= action
;
1066 p_first_filter
->vport_id
= (p_filter_cmd
->opcode
==
1067 QED_FILTER_REMOVE
) ?
1068 vport_to_remove_from
:
1075 int qed_sp_eth_filter_ucast(struct qed_hwfn
*p_hwfn
,
1077 struct qed_filter_ucast
*p_filter_cmd
,
1078 enum spq_mode comp_mode
,
1079 struct qed_spq_comp_cb
*p_comp_data
)
1081 struct vport_filter_update_ramrod_data
*p_ramrod
= NULL
;
1082 struct qed_spq_entry
*p_ent
= NULL
;
1083 struct eth_filter_cmd_header
*p_header
;
1086 rc
= qed_filter_ucast_common(p_hwfn
, opaque_fid
, p_filter_cmd
,
1088 comp_mode
, p_comp_data
);
1090 DP_ERR(p_hwfn
, "Uni. filter command failed %d\n", rc
);
1093 p_header
= &p_ramrod
->filter_cmd_hdr
;
1094 p_header
->assert_on_error
= p_filter_cmd
->assert_on_error
;
1096 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1099 "Unicast filter ADD command failed %d\n",
1104 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
1105 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1106 (p_filter_cmd
->opcode
== QED_FILTER_ADD
) ? "ADD" :
1107 ((p_filter_cmd
->opcode
== QED_FILTER_REMOVE
) ?
1109 ((p_filter_cmd
->opcode
== QED_FILTER_MOVE
) ?
1110 "MOVE" : "REPLACE")),
1111 (p_filter_cmd
->type
== QED_FILTER_MAC
) ? "MAC" :
1112 ((p_filter_cmd
->type
== QED_FILTER_VLAN
) ?
1113 "VLAN" : "MAC & VLAN"),
1114 p_ramrod
->filter_cmd_hdr
.cmd_cnt
,
1115 p_filter_cmd
->is_rx_filter
,
1116 p_filter_cmd
->is_tx_filter
);
1117 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
1118 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1119 p_filter_cmd
->vport_to_add_to
,
1120 p_filter_cmd
->vport_to_remove_from
,
1121 p_filter_cmd
->mac
[0],
1122 p_filter_cmd
->mac
[1],
1123 p_filter_cmd
->mac
[2],
1124 p_filter_cmd
->mac
[3],
1125 p_filter_cmd
->mac
[4],
1126 p_filter_cmd
->mac
[5],
1127 p_filter_cmd
->vlan
);
1132 /*******************************************************************************
1134 * Calculates crc 32 on a buffer
1135 * Note: crc32_length MUST be aligned to 8
1137 ******************************************************************************/
1138 static u32
qed_calc_crc32c(u8
*crc32_packet
,
1146 u8 current_byte
= 0;
1147 u32 crc32_result
= crc32_seed
;
1149 if ((!crc32_packet
) ||
1150 (crc32_length
== 0) ||
1151 ((crc32_length
% 8) != 0))
1152 return crc32_result
;
1153 for (byte
= 0; byte
< crc32_length
; byte
++) {
1154 current_byte
= crc32_packet
[byte
];
1155 for (bit
= 0; bit
< 8; bit
++) {
1156 msb
= (u8
)(crc32_result
>> 31);
1157 crc32_result
= crc32_result
<< 1;
1158 if (msb
!= (0x1 & (current_byte
>> bit
))) {
1159 crc32_result
= crc32_result
^ CRC32_POLY
;
1160 crc32_result
|= 1; /*crc32_result[0] = 1;*/
1164 return crc32_result
;
1167 static inline u32
qed_crc32c_le(u32 seed
,
1171 u32 packet_buf
[2] = { 0 };
1173 memcpy((u8
*)(&packet_buf
[0]), &mac
[0], 6);
1174 return qed_calc_crc32c((u8
*)packet_buf
, 8, seed
, 0);
1177 u8
qed_mcast_bin_from_mac(u8
*mac
)
1179 u32 crc
= qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED
,
1186 qed_sp_eth_filter_mcast(struct qed_hwfn
*p_hwfn
,
1188 struct qed_filter_mcast
*p_filter_cmd
,
1189 enum spq_mode comp_mode
,
1190 struct qed_spq_comp_cb
*p_comp_data
)
1192 unsigned long bins
[ETH_MULTICAST_MAC_BINS_IN_REGS
];
1193 struct vport_update_ramrod_data
*p_ramrod
= NULL
;
1194 struct qed_spq_entry
*p_ent
= NULL
;
1195 struct qed_sp_init_data init_data
;
1196 u8 abs_vport_id
= 0;
1199 if (p_filter_cmd
->opcode
== QED_FILTER_ADD
) {
1200 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_add_to
,
1205 rc
= qed_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_remove_from
,
1212 memset(&init_data
, 0, sizeof(init_data
));
1213 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
1214 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1215 init_data
.comp_mode
= comp_mode
;
1216 init_data
.p_comp_data
= p_comp_data
;
1218 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1219 ETH_RAMROD_VPORT_UPDATE
,
1220 PROTOCOLID_ETH
, &init_data
);
1222 DP_ERR(p_hwfn
, "Multi-cast command failed %d\n", rc
);
1226 p_ramrod
= &p_ent
->ramrod
.vport_update
;
1227 p_ramrod
->common
.update_approx_mcast_flg
= 1;
1229 /* explicitly clear out the entire vector */
1230 memset(&p_ramrod
->approx_mcast
.bins
, 0,
1231 sizeof(p_ramrod
->approx_mcast
.bins
));
1232 memset(bins
, 0, sizeof(unsigned long) *
1233 ETH_MULTICAST_MAC_BINS_IN_REGS
);
1234 /* filter ADD op is explicit set op and it removes
1235 * any existing filters for the vport
1237 if (p_filter_cmd
->opcode
== QED_FILTER_ADD
) {
1238 for (i
= 0; i
< p_filter_cmd
->num_mc_addrs
; i
++) {
1241 bit
= qed_mcast_bin_from_mac(p_filter_cmd
->mac
[i
]);
1242 __set_bit(bit
, bins
);
1245 /* Convert to correct endianity */
1246 for (i
= 0; i
< ETH_MULTICAST_MAC_BINS_IN_REGS
; i
++) {
1247 u32
*p_bins
= (u32
*)bins
;
1248 struct vport_update_ramrod_mcast
*approx_mcast
;
1250 approx_mcast
= &p_ramrod
->approx_mcast
;
1251 approx_mcast
->bins
[i
] = cpu_to_le32(p_bins
[i
]);
1255 p_ramrod
->common
.vport_id
= abs_vport_id
;
1257 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1260 static int qed_filter_mcast_cmd(struct qed_dev
*cdev
,
1261 struct qed_filter_mcast
*p_filter_cmd
,
1262 enum spq_mode comp_mode
,
1263 struct qed_spq_comp_cb
*p_comp_data
)
1268 /* only ADD and REMOVE operations are supported for multi-cast */
1269 if ((p_filter_cmd
->opcode
!= QED_FILTER_ADD
&&
1270 (p_filter_cmd
->opcode
!= QED_FILTER_REMOVE
)) ||
1271 (p_filter_cmd
->num_mc_addrs
> QED_MAX_MC_ADDRS
))
1274 for_each_hwfn(cdev
, i
) {
1275 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1280 qed_vf_pf_filter_mcast(p_hwfn
, p_filter_cmd
);
1284 opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1286 rc
= qed_sp_eth_filter_mcast(p_hwfn
,
1295 static int qed_filter_ucast_cmd(struct qed_dev
*cdev
,
1296 struct qed_filter_ucast
*p_filter_cmd
,
1297 enum spq_mode comp_mode
,
1298 struct qed_spq_comp_cb
*p_comp_data
)
1303 for_each_hwfn(cdev
, i
) {
1304 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1308 rc
= qed_vf_pf_filter_ucast(p_hwfn
, p_filter_cmd
);
1312 opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1314 rc
= qed_sp_eth_filter_ucast(p_hwfn
,
1326 /* Statistics related code */
1327 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn
*p_hwfn
,
1329 u32
*p_len
, u16 statistics_bin
)
1331 if (IS_PF(p_hwfn
->cdev
)) {
1332 *p_addr
= BAR0_MAP_REG_PSDM_RAM
+
1333 PSTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1334 *p_len
= sizeof(struct eth_pstorm_per_queue_stat
);
1336 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1337 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1339 *p_addr
= p_resp
->pfdev_info
.stats_info
.pstats
.address
;
1340 *p_len
= p_resp
->pfdev_info
.stats_info
.pstats
.len
;
1344 static void __qed_get_vport_pstats(struct qed_hwfn
*p_hwfn
,
1345 struct qed_ptt
*p_ptt
,
1346 struct qed_eth_stats
*p_stats
,
1349 struct eth_pstorm_per_queue_stat pstats
;
1350 u32 pstats_addr
= 0, pstats_len
= 0;
1352 __qed_get_vport_pstats_addrlen(p_hwfn
, &pstats_addr
, &pstats_len
,
1355 memset(&pstats
, 0, sizeof(pstats
));
1356 qed_memcpy_from(p_hwfn
, p_ptt
, &pstats
, pstats_addr
, pstats_len
);
1358 p_stats
->tx_ucast_bytes
+= HILO_64_REGPAIR(pstats
.sent_ucast_bytes
);
1359 p_stats
->tx_mcast_bytes
+= HILO_64_REGPAIR(pstats
.sent_mcast_bytes
);
1360 p_stats
->tx_bcast_bytes
+= HILO_64_REGPAIR(pstats
.sent_bcast_bytes
);
1361 p_stats
->tx_ucast_pkts
+= HILO_64_REGPAIR(pstats
.sent_ucast_pkts
);
1362 p_stats
->tx_mcast_pkts
+= HILO_64_REGPAIR(pstats
.sent_mcast_pkts
);
1363 p_stats
->tx_bcast_pkts
+= HILO_64_REGPAIR(pstats
.sent_bcast_pkts
);
1364 p_stats
->tx_err_drop_pkts
+= HILO_64_REGPAIR(pstats
.error_drop_pkts
);
1367 static void __qed_get_vport_tstats(struct qed_hwfn
*p_hwfn
,
1368 struct qed_ptt
*p_ptt
,
1369 struct qed_eth_stats
*p_stats
,
1372 struct tstorm_per_port_stat tstats
;
1373 u32 tstats_addr
, tstats_len
;
1375 if (IS_PF(p_hwfn
->cdev
)) {
1376 tstats_addr
= BAR0_MAP_REG_TSDM_RAM
+
1377 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn
));
1378 tstats_len
= sizeof(struct tstorm_per_port_stat
);
1380 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1381 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1383 tstats_addr
= p_resp
->pfdev_info
.stats_info
.tstats
.address
;
1384 tstats_len
= p_resp
->pfdev_info
.stats_info
.tstats
.len
;
1387 memset(&tstats
, 0, sizeof(tstats
));
1388 qed_memcpy_from(p_hwfn
, p_ptt
, &tstats
, tstats_addr
, tstats_len
);
1390 p_stats
->mftag_filter_discards
+=
1391 HILO_64_REGPAIR(tstats
.mftag_filter_discard
);
1392 p_stats
->mac_filter_discards
+=
1393 HILO_64_REGPAIR(tstats
.eth_mac_filter_discard
);
1396 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn
*p_hwfn
,
1398 u32
*p_len
, u16 statistics_bin
)
1400 if (IS_PF(p_hwfn
->cdev
)) {
1401 *p_addr
= BAR0_MAP_REG_USDM_RAM
+
1402 USTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1403 *p_len
= sizeof(struct eth_ustorm_per_queue_stat
);
1405 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1406 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1408 *p_addr
= p_resp
->pfdev_info
.stats_info
.ustats
.address
;
1409 *p_len
= p_resp
->pfdev_info
.stats_info
.ustats
.len
;
1413 static void __qed_get_vport_ustats(struct qed_hwfn
*p_hwfn
,
1414 struct qed_ptt
*p_ptt
,
1415 struct qed_eth_stats
*p_stats
,
1418 struct eth_ustorm_per_queue_stat ustats
;
1419 u32 ustats_addr
= 0, ustats_len
= 0;
1421 __qed_get_vport_ustats_addrlen(p_hwfn
, &ustats_addr
, &ustats_len
,
1424 memset(&ustats
, 0, sizeof(ustats
));
1425 qed_memcpy_from(p_hwfn
, p_ptt
, &ustats
, ustats_addr
, ustats_len
);
1427 p_stats
->rx_ucast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_ucast_bytes
);
1428 p_stats
->rx_mcast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_mcast_bytes
);
1429 p_stats
->rx_bcast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_bcast_bytes
);
1430 p_stats
->rx_ucast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_ucast_pkts
);
1431 p_stats
->rx_mcast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_mcast_pkts
);
1432 p_stats
->rx_bcast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_bcast_pkts
);
1435 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn
*p_hwfn
,
1437 u32
*p_len
, u16 statistics_bin
)
1439 if (IS_PF(p_hwfn
->cdev
)) {
1440 *p_addr
= BAR0_MAP_REG_MSDM_RAM
+
1441 MSTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1442 *p_len
= sizeof(struct eth_mstorm_per_queue_stat
);
1444 struct qed_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1445 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1447 *p_addr
= p_resp
->pfdev_info
.stats_info
.mstats
.address
;
1448 *p_len
= p_resp
->pfdev_info
.stats_info
.mstats
.len
;
1452 static void __qed_get_vport_mstats(struct qed_hwfn
*p_hwfn
,
1453 struct qed_ptt
*p_ptt
,
1454 struct qed_eth_stats
*p_stats
,
1457 struct eth_mstorm_per_queue_stat mstats
;
1458 u32 mstats_addr
= 0, mstats_len
= 0;
1460 __qed_get_vport_mstats_addrlen(p_hwfn
, &mstats_addr
, &mstats_len
,
1463 memset(&mstats
, 0, sizeof(mstats
));
1464 qed_memcpy_from(p_hwfn
, p_ptt
, &mstats
, mstats_addr
, mstats_len
);
1466 p_stats
->no_buff_discards
+= HILO_64_REGPAIR(mstats
.no_buff_discard
);
1467 p_stats
->packet_too_big_discard
+=
1468 HILO_64_REGPAIR(mstats
.packet_too_big_discard
);
1469 p_stats
->ttl0_discard
+= HILO_64_REGPAIR(mstats
.ttl0_discard
);
1470 p_stats
->tpa_coalesced_pkts
+=
1471 HILO_64_REGPAIR(mstats
.tpa_coalesced_pkts
);
1472 p_stats
->tpa_coalesced_events
+=
1473 HILO_64_REGPAIR(mstats
.tpa_coalesced_events
);
1474 p_stats
->tpa_aborts_num
+= HILO_64_REGPAIR(mstats
.tpa_aborts_num
);
1475 p_stats
->tpa_coalesced_bytes
+=
1476 HILO_64_REGPAIR(mstats
.tpa_coalesced_bytes
);
1479 static void __qed_get_vport_port_stats(struct qed_hwfn
*p_hwfn
,
1480 struct qed_ptt
*p_ptt
,
1481 struct qed_eth_stats
*p_stats
)
1483 struct port_stats port_stats
;
1486 memset(&port_stats
, 0, sizeof(port_stats
));
1488 qed_memcpy_from(p_hwfn
, p_ptt
, &port_stats
,
1489 p_hwfn
->mcp_info
->port_addr
+
1490 offsetof(struct public_port
, stats
),
1491 sizeof(port_stats
));
1493 p_stats
->rx_64_byte_packets
+= port_stats
.eth
.r64
;
1494 p_stats
->rx_65_to_127_byte_packets
+= port_stats
.eth
.r127
;
1495 p_stats
->rx_128_to_255_byte_packets
+= port_stats
.eth
.r255
;
1496 p_stats
->rx_256_to_511_byte_packets
+= port_stats
.eth
.r511
;
1497 p_stats
->rx_512_to_1023_byte_packets
+= port_stats
.eth
.r1023
;
1498 p_stats
->rx_1024_to_1518_byte_packets
+= port_stats
.eth
.r1518
;
1499 p_stats
->rx_1519_to_1522_byte_packets
+= port_stats
.eth
.r1522
;
1500 p_stats
->rx_1519_to_2047_byte_packets
+= port_stats
.eth
.r2047
;
1501 p_stats
->rx_2048_to_4095_byte_packets
+= port_stats
.eth
.r4095
;
1502 p_stats
->rx_4096_to_9216_byte_packets
+= port_stats
.eth
.r9216
;
1503 p_stats
->rx_9217_to_16383_byte_packets
+= port_stats
.eth
.r16383
;
1504 p_stats
->rx_crc_errors
+= port_stats
.eth
.rfcs
;
1505 p_stats
->rx_mac_crtl_frames
+= port_stats
.eth
.rxcf
;
1506 p_stats
->rx_pause_frames
+= port_stats
.eth
.rxpf
;
1507 p_stats
->rx_pfc_frames
+= port_stats
.eth
.rxpp
;
1508 p_stats
->rx_align_errors
+= port_stats
.eth
.raln
;
1509 p_stats
->rx_carrier_errors
+= port_stats
.eth
.rfcr
;
1510 p_stats
->rx_oversize_packets
+= port_stats
.eth
.rovr
;
1511 p_stats
->rx_jabbers
+= port_stats
.eth
.rjbr
;
1512 p_stats
->rx_undersize_packets
+= port_stats
.eth
.rund
;
1513 p_stats
->rx_fragments
+= port_stats
.eth
.rfrg
;
1514 p_stats
->tx_64_byte_packets
+= port_stats
.eth
.t64
;
1515 p_stats
->tx_65_to_127_byte_packets
+= port_stats
.eth
.t127
;
1516 p_stats
->tx_128_to_255_byte_packets
+= port_stats
.eth
.t255
;
1517 p_stats
->tx_256_to_511_byte_packets
+= port_stats
.eth
.t511
;
1518 p_stats
->tx_512_to_1023_byte_packets
+= port_stats
.eth
.t1023
;
1519 p_stats
->tx_1024_to_1518_byte_packets
+= port_stats
.eth
.t1518
;
1520 p_stats
->tx_1519_to_2047_byte_packets
+= port_stats
.eth
.t2047
;
1521 p_stats
->tx_2048_to_4095_byte_packets
+= port_stats
.eth
.t4095
;
1522 p_stats
->tx_4096_to_9216_byte_packets
+= port_stats
.eth
.t9216
;
1523 p_stats
->tx_9217_to_16383_byte_packets
+= port_stats
.eth
.t16383
;
1524 p_stats
->tx_pause_frames
+= port_stats
.eth
.txpf
;
1525 p_stats
->tx_pfc_frames
+= port_stats
.eth
.txpp
;
1526 p_stats
->tx_lpi_entry_count
+= port_stats
.eth
.tlpiec
;
1527 p_stats
->tx_total_collisions
+= port_stats
.eth
.tncl
;
1528 p_stats
->rx_mac_bytes
+= port_stats
.eth
.rbyte
;
1529 p_stats
->rx_mac_uc_packets
+= port_stats
.eth
.rxuca
;
1530 p_stats
->rx_mac_mc_packets
+= port_stats
.eth
.rxmca
;
1531 p_stats
->rx_mac_bc_packets
+= port_stats
.eth
.rxbca
;
1532 p_stats
->rx_mac_frames_ok
+= port_stats
.eth
.rxpok
;
1533 p_stats
->tx_mac_bytes
+= port_stats
.eth
.tbyte
;
1534 p_stats
->tx_mac_uc_packets
+= port_stats
.eth
.txuca
;
1535 p_stats
->tx_mac_mc_packets
+= port_stats
.eth
.txmca
;
1536 p_stats
->tx_mac_bc_packets
+= port_stats
.eth
.txbca
;
1537 p_stats
->tx_mac_ctrl_frames
+= port_stats
.eth
.txcf
;
1538 for (j
= 0; j
< 8; j
++) {
1539 p_stats
->brb_truncates
+= port_stats
.brb
.brb_truncate
[j
];
1540 p_stats
->brb_discards
+= port_stats
.brb
.brb_discard
[j
];
1544 static void __qed_get_vport_stats(struct qed_hwfn
*p_hwfn
,
1545 struct qed_ptt
*p_ptt
,
1546 struct qed_eth_stats
*stats
,
1547 u16 statistics_bin
, bool b_get_port_stats
)
1549 __qed_get_vport_mstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1550 __qed_get_vport_ustats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1551 __qed_get_vport_tstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1552 __qed_get_vport_pstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1554 if (b_get_port_stats
&& p_hwfn
->mcp_info
)
1555 __qed_get_vport_port_stats(p_hwfn
, p_ptt
, stats
);
1558 static void _qed_get_vport_stats(struct qed_dev
*cdev
,
1559 struct qed_eth_stats
*stats
)
1564 memset(stats
, 0, sizeof(*stats
));
1566 for_each_hwfn(cdev
, i
) {
1567 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1568 struct qed_ptt
*p_ptt
= IS_PF(cdev
) ? qed_ptt_acquire(p_hwfn
)
1572 /* The main vport index is relative first */
1573 if (qed_fw_vport(p_hwfn
, 0, &fw_vport
)) {
1574 DP_ERR(p_hwfn
, "No vport available!\n");
1579 if (IS_PF(cdev
) && !p_ptt
) {
1580 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
1584 __qed_get_vport_stats(p_hwfn
, p_ptt
, stats
, fw_vport
,
1585 IS_PF(cdev
) ? true : false);
1588 if (IS_PF(cdev
) && p_ptt
)
1589 qed_ptt_release(p_hwfn
, p_ptt
);
1593 void qed_get_vport_stats(struct qed_dev
*cdev
,
1594 struct qed_eth_stats
*stats
)
1599 memset(stats
, 0, sizeof(*stats
));
1603 _qed_get_vport_stats(cdev
, stats
);
1605 if (!cdev
->reset_stats
)
1608 /* Reduce the statistics baseline */
1609 for (i
= 0; i
< sizeof(struct qed_eth_stats
) / sizeof(u64
); i
++)
1610 ((u64
*)stats
)[i
] -= ((u64
*)cdev
->reset_stats
)[i
];
1613 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1614 void qed_reset_vport_stats(struct qed_dev
*cdev
)
1618 for_each_hwfn(cdev
, i
) {
1619 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1620 struct eth_mstorm_per_queue_stat mstats
;
1621 struct eth_ustorm_per_queue_stat ustats
;
1622 struct eth_pstorm_per_queue_stat pstats
;
1623 struct qed_ptt
*p_ptt
= IS_PF(cdev
) ? qed_ptt_acquire(p_hwfn
)
1625 u32 addr
= 0, len
= 0;
1627 if (IS_PF(cdev
) && !p_ptt
) {
1628 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
1632 memset(&mstats
, 0, sizeof(mstats
));
1633 __qed_get_vport_mstats_addrlen(p_hwfn
, &addr
, &len
, 0);
1634 qed_memcpy_to(p_hwfn
, p_ptt
, addr
, &mstats
, len
);
1636 memset(&ustats
, 0, sizeof(ustats
));
1637 __qed_get_vport_ustats_addrlen(p_hwfn
, &addr
, &len
, 0);
1638 qed_memcpy_to(p_hwfn
, p_ptt
, addr
, &ustats
, len
);
1640 memset(&pstats
, 0, sizeof(pstats
));
1641 __qed_get_vport_pstats_addrlen(p_hwfn
, &addr
, &len
, 0);
1642 qed_memcpy_to(p_hwfn
, p_ptt
, addr
, &pstats
, len
);
1645 qed_ptt_release(p_hwfn
, p_ptt
);
1648 /* PORT statistics are not necessarily reset, so we need to
1649 * read and create a baseline for future statistics.
1651 if (!cdev
->reset_stats
)
1652 DP_INFO(cdev
, "Reset stats not allocated\n");
1654 _qed_get_vport_stats(cdev
, cdev
->reset_stats
);
1657 static int qed_fill_eth_dev_info(struct qed_dev
*cdev
,
1658 struct qed_dev_eth_info
*info
)
1662 memset(info
, 0, sizeof(*info
));
1667 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
1668 for_each_hwfn(cdev
, i
)
1670 FEAT_NUM(&cdev
->hwfns
[i
], QED_PF_L2_QUE
);
1671 if (cdev
->int_params
.fp_msix_cnt
)
1673 min_t(u8
, info
->num_queues
,
1674 cdev
->int_params
.fp_msix_cnt
);
1676 info
->num_queues
= cdev
->num_hwfns
;
1679 info
->num_vlan_filters
= RESC_NUM(&cdev
->hwfns
[0], QED_VLAN
);
1680 ether_addr_copy(info
->port_mac
,
1681 cdev
->hwfns
[0].hw_info
.hw_mac_addr
);
1683 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev
), &info
->num_queues
);
1684 if (cdev
->num_hwfns
> 1) {
1687 qed_vf_get_num_rxqs(&cdev
->hwfns
[1], &queues
);
1688 info
->num_queues
+= queues
;
1691 qed_vf_get_num_vlan_filters(&cdev
->hwfns
[0],
1692 &info
->num_vlan_filters
);
1693 qed_vf_get_port_mac(&cdev
->hwfns
[0], info
->port_mac
);
1696 qed_fill_dev_info(cdev
, &info
->common
);
1699 memset(info
->common
.hw_mac
, 0, ETH_ALEN
);
1704 static void qed_register_eth_ops(struct qed_dev
*cdev
,
1705 struct qed_eth_cb_ops
*ops
, void *cookie
)
1707 cdev
->protocol_ops
.eth
= ops
;
1708 cdev
->ops_cookie
= cookie
;
1710 /* For VF, we start bulletin reading */
1712 qed_vf_start_iov_wq(cdev
);
1715 static bool qed_check_mac(struct qed_dev
*cdev
, u8
*mac
)
1720 return qed_vf_check_mac(&cdev
->hwfns
[0], mac
);
1723 static int qed_start_vport(struct qed_dev
*cdev
,
1724 struct qed_start_vport_params
*params
)
1728 for_each_hwfn(cdev
, i
) {
1729 struct qed_sp_vport_start_params start
= { 0 };
1730 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1732 start
.tpa_mode
= params
->gro_enable
? QED_TPA_MODE_GRO
:
1734 start
.remove_inner_vlan
= params
->remove_inner_vlan
;
1735 start
.only_untagged
= true; /* untagged only */
1736 start
.drop_ttl0
= params
->drop_ttl0
;
1737 start
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1738 start
.concrete_fid
= p_hwfn
->hw_info
.concrete_fid
;
1739 start
.vport_id
= params
->vport_id
;
1740 start
.max_buffers_per_cqe
= 16;
1741 start
.mtu
= params
->mtu
;
1743 rc
= qed_sp_vport_start(p_hwfn
, &start
);
1745 DP_ERR(cdev
, "Failed to start VPORT\n");
1749 qed_hw_start_fastpath(p_hwfn
);
1751 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1752 "Started V-PORT %d with MTU %d\n",
1753 start
.vport_id
, start
.mtu
);
1756 if (params
->clear_stats
)
1757 qed_reset_vport_stats(cdev
);
1762 static int qed_stop_vport(struct qed_dev
*cdev
,
1767 for_each_hwfn(cdev
, i
) {
1768 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1770 rc
= qed_sp_vport_stop(p_hwfn
,
1771 p_hwfn
->hw_info
.opaque_fid
,
1775 DP_ERR(cdev
, "Failed to stop VPORT\n");
1782 static int qed_update_vport(struct qed_dev
*cdev
,
1783 struct qed_update_vport_params
*params
)
1785 struct qed_sp_vport_update_params sp_params
;
1786 struct qed_rss_params sp_rss_params
;
1792 memset(&sp_params
, 0, sizeof(sp_params
));
1793 memset(&sp_rss_params
, 0, sizeof(sp_rss_params
));
1795 /* Translate protocol params into sp params */
1796 sp_params
.vport_id
= params
->vport_id
;
1797 sp_params
.update_vport_active_rx_flg
=
1798 params
->update_vport_active_flg
;
1799 sp_params
.update_vport_active_tx_flg
=
1800 params
->update_vport_active_flg
;
1801 sp_params
.vport_active_rx_flg
= params
->vport_active_flg
;
1802 sp_params
.vport_active_tx_flg
= params
->vport_active_flg
;
1803 sp_params
.update_tx_switching_flg
= params
->update_tx_switching_flg
;
1804 sp_params
.tx_switching_flg
= params
->tx_switching_flg
;
1805 sp_params
.accept_any_vlan
= params
->accept_any_vlan
;
1806 sp_params
.update_accept_any_vlan_flg
=
1807 params
->update_accept_any_vlan_flg
;
1809 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
1810 * We need to re-fix the rss values per engine for CMT.
1812 if (cdev
->num_hwfns
> 1 && params
->update_rss_flg
) {
1813 struct qed_update_vport_rss_params
*rss
=
1814 ¶ms
->rss_params
;
1817 /* Find largest entry, since it's possible RSS needs to
1818 * be disabled [in case only 1 queue per-hwfn]
1820 for (k
= 0; k
< QED_RSS_IND_TABLE_SIZE
; k
++)
1821 max
= (max
> rss
->rss_ind_table
[k
]) ?
1822 max
: rss
->rss_ind_table
[k
];
1824 /* Either fix RSS values or disable RSS */
1825 if (cdev
->num_hwfns
< max
+ 1) {
1826 int divisor
= (max
+ cdev
->num_hwfns
- 1) /
1829 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1830 "CMT - fixing RSS values (modulo %02x)\n",
1833 for (k
= 0; k
< QED_RSS_IND_TABLE_SIZE
; k
++)
1834 rss
->rss_ind_table
[k
] =
1835 rss
->rss_ind_table
[k
] % divisor
;
1837 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1838 "CMT - 1 queue per-hwfn; Disabling RSS\n");
1839 params
->update_rss_flg
= 0;
1843 /* Now, update the RSS configuration for actual configuration */
1844 if (params
->update_rss_flg
) {
1845 sp_rss_params
.update_rss_config
= 1;
1846 sp_rss_params
.rss_enable
= 1;
1847 sp_rss_params
.update_rss_capabilities
= 1;
1848 sp_rss_params
.update_rss_ind_table
= 1;
1849 sp_rss_params
.update_rss_key
= 1;
1850 sp_rss_params
.rss_caps
= params
->rss_params
.rss_caps
;
1851 sp_rss_params
.rss_table_size_log
= 7; /* 2^7 = 128 */
1852 memcpy(sp_rss_params
.rss_ind_table
,
1853 params
->rss_params
.rss_ind_table
,
1854 QED_RSS_IND_TABLE_SIZE
* sizeof(u16
));
1855 memcpy(sp_rss_params
.rss_key
, params
->rss_params
.rss_key
,
1856 QED_RSS_KEY_SIZE
* sizeof(u32
));
1858 sp_params
.rss_params
= &sp_rss_params
;
1860 for_each_hwfn(cdev
, i
) {
1861 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1863 sp_params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1864 rc
= qed_sp_vport_update(p_hwfn
, &sp_params
,
1865 QED_SPQ_MODE_EBLOCK
,
1868 DP_ERR(cdev
, "Failed to update VPORT\n");
1872 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1873 "Updated V-PORT %d: active_flag %d [update %d]\n",
1874 params
->vport_id
, params
->vport_active_flg
,
1875 params
->update_vport_active_flg
);
1881 static int qed_start_rxq(struct qed_dev
*cdev
,
1882 struct qed_queue_start_common_params
*params
,
1884 dma_addr_t bd_chain_phys_addr
,
1885 dma_addr_t cqe_pbl_addr
,
1887 void __iomem
**pp_prod
)
1890 struct qed_hwfn
*p_hwfn
;
1892 hwfn_index
= params
->rss_id
% cdev
->num_hwfns
;
1893 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1895 /* Fix queue ID in 100g mode */
1896 params
->queue_id
/= cdev
->num_hwfns
;
1898 rc
= qed_sp_eth_rx_queue_start(p_hwfn
,
1899 p_hwfn
->hw_info
.opaque_fid
,
1908 DP_ERR(cdev
, "Failed to start RXQ#%d\n", params
->queue_id
);
1912 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1913 "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1914 params
->queue_id
, params
->rss_id
, params
->vport_id
,
1920 static int qed_stop_rxq(struct qed_dev
*cdev
,
1921 struct qed_stop_rxq_params
*params
)
1924 struct qed_hwfn
*p_hwfn
;
1926 hwfn_index
= params
->rss_id
% cdev
->num_hwfns
;
1927 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1929 rc
= qed_sp_eth_rx_queue_stop(p_hwfn
,
1930 params
->rx_queue_id
/ cdev
->num_hwfns
,
1931 params
->eq_completion_only
,
1934 DP_ERR(cdev
, "Failed to stop RXQ#%d\n", params
->rx_queue_id
);
1941 static int qed_start_txq(struct qed_dev
*cdev
,
1942 struct qed_queue_start_common_params
*p_params
,
1943 dma_addr_t pbl_addr
,
1945 void __iomem
**pp_doorbell
)
1947 struct qed_hwfn
*p_hwfn
;
1950 hwfn_index
= p_params
->rss_id
% cdev
->num_hwfns
;
1951 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1953 /* Fix queue ID in 100g mode */
1954 p_params
->queue_id
/= cdev
->num_hwfns
;
1956 rc
= qed_sp_eth_tx_queue_start(p_hwfn
,
1957 p_hwfn
->hw_info
.opaque_fid
,
1964 DP_ERR(cdev
, "Failed to start TXQ#%d\n", p_params
->queue_id
);
1968 DP_VERBOSE(cdev
, (QED_MSG_SPQ
| NETIF_MSG_IFUP
),
1969 "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1970 p_params
->queue_id
, p_params
->rss_id
, p_params
->vport_id
,
1976 #define QED_HW_STOP_RETRY_LIMIT (10)
1977 static int qed_fastpath_stop(struct qed_dev
*cdev
)
1979 qed_hw_stop_fastpath(cdev
);
1984 static int qed_stop_txq(struct qed_dev
*cdev
,
1985 struct qed_stop_txq_params
*params
)
1987 struct qed_hwfn
*p_hwfn
;
1990 hwfn_index
= params
->rss_id
% cdev
->num_hwfns
;
1991 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1993 rc
= qed_sp_eth_tx_queue_stop(p_hwfn
,
1994 params
->tx_queue_id
/ cdev
->num_hwfns
);
1996 DP_ERR(cdev
, "Failed to stop TXQ#%d\n", params
->tx_queue_id
);
2003 static int qed_tunn_configure(struct qed_dev
*cdev
,
2004 struct qed_tunn_params
*tunn_params
)
2006 struct qed_tunn_update_params tunn_info
;
2012 memset(&tunn_info
, 0, sizeof(tunn_info
));
2013 if (tunn_params
->update_vxlan_port
== 1) {
2014 tunn_info
.update_vxlan_udp_port
= 1;
2015 tunn_info
.vxlan_udp_port
= tunn_params
->vxlan_port
;
2018 if (tunn_params
->update_geneve_port
== 1) {
2019 tunn_info
.update_geneve_udp_port
= 1;
2020 tunn_info
.geneve_udp_port
= tunn_params
->geneve_port
;
2023 for_each_hwfn(cdev
, i
) {
2024 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
2026 rc
= qed_sp_pf_update_tunn_cfg(hwfn
, &tunn_info
,
2027 QED_SPQ_MODE_EBLOCK
, NULL
);
2036 static int qed_configure_filter_rx_mode(struct qed_dev
*cdev
,
2037 enum qed_filter_rx_mode_type type
)
2039 struct qed_filter_accept_flags accept_flags
;
2041 memset(&accept_flags
, 0, sizeof(accept_flags
));
2043 accept_flags
.update_rx_mode_config
= 1;
2044 accept_flags
.update_tx_mode_config
= 1;
2045 accept_flags
.rx_accept_filter
= QED_ACCEPT_UCAST_MATCHED
|
2046 QED_ACCEPT_MCAST_MATCHED
|
2048 accept_flags
.tx_accept_filter
= QED_ACCEPT_UCAST_MATCHED
|
2049 QED_ACCEPT_MCAST_MATCHED
|
2052 if (type
== QED_FILTER_RX_MODE_TYPE_PROMISC
)
2053 accept_flags
.rx_accept_filter
|= QED_ACCEPT_UCAST_UNMATCHED
|
2054 QED_ACCEPT_MCAST_UNMATCHED
;
2055 else if (type
== QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
)
2056 accept_flags
.rx_accept_filter
|= QED_ACCEPT_MCAST_UNMATCHED
;
2058 return qed_filter_accept_cmd(cdev
, 0, accept_flags
, false, false,
2059 QED_SPQ_MODE_CB
, NULL
);
2062 static int qed_configure_filter_ucast(struct qed_dev
*cdev
,
2063 struct qed_filter_ucast_params
*params
)
2065 struct qed_filter_ucast ucast
;
2067 if (!params
->vlan_valid
&& !params
->mac_valid
) {
2070 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2074 memset(&ucast
, 0, sizeof(ucast
));
2075 switch (params
->type
) {
2076 case QED_FILTER_XCAST_TYPE_ADD
:
2077 ucast
.opcode
= QED_FILTER_ADD
;
2079 case QED_FILTER_XCAST_TYPE_DEL
:
2080 ucast
.opcode
= QED_FILTER_REMOVE
;
2082 case QED_FILTER_XCAST_TYPE_REPLACE
:
2083 ucast
.opcode
= QED_FILTER_REPLACE
;
2086 DP_NOTICE(cdev
, "Unknown unicast filter type %d\n",
2090 if (params
->vlan_valid
&& params
->mac_valid
) {
2091 ucast
.type
= QED_FILTER_MAC_VLAN
;
2092 ether_addr_copy(ucast
.mac
, params
->mac
);
2093 ucast
.vlan
= params
->vlan
;
2094 } else if (params
->mac_valid
) {
2095 ucast
.type
= QED_FILTER_MAC
;
2096 ether_addr_copy(ucast
.mac
, params
->mac
);
2098 ucast
.type
= QED_FILTER_VLAN
;
2099 ucast
.vlan
= params
->vlan
;
2102 ucast
.is_rx_filter
= true;
2103 ucast
.is_tx_filter
= true;
2105 return qed_filter_ucast_cmd(cdev
, &ucast
, QED_SPQ_MODE_CB
, NULL
);
2108 static int qed_configure_filter_mcast(struct qed_dev
*cdev
,
2109 struct qed_filter_mcast_params
*params
)
2111 struct qed_filter_mcast mcast
;
2114 memset(&mcast
, 0, sizeof(mcast
));
2115 switch (params
->type
) {
2116 case QED_FILTER_XCAST_TYPE_ADD
:
2117 mcast
.opcode
= QED_FILTER_ADD
;
2119 case QED_FILTER_XCAST_TYPE_DEL
:
2120 mcast
.opcode
= QED_FILTER_REMOVE
;
2123 DP_NOTICE(cdev
, "Unknown multicast filter type %d\n",
2127 mcast
.num_mc_addrs
= params
->num
;
2128 for (i
= 0; i
< mcast
.num_mc_addrs
; i
++)
2129 ether_addr_copy(mcast
.mac
[i
], params
->mac
[i
]);
2131 return qed_filter_mcast_cmd(cdev
, &mcast
,
2132 QED_SPQ_MODE_CB
, NULL
);
2135 static int qed_configure_filter(struct qed_dev
*cdev
,
2136 struct qed_filter_params
*params
)
2138 enum qed_filter_rx_mode_type accept_flags
;
2140 switch (params
->type
) {
2141 case QED_FILTER_TYPE_UCAST
:
2142 return qed_configure_filter_ucast(cdev
, ¶ms
->filter
.ucast
);
2143 case QED_FILTER_TYPE_MCAST
:
2144 return qed_configure_filter_mcast(cdev
, ¶ms
->filter
.mcast
);
2145 case QED_FILTER_TYPE_RX_MODE
:
2146 accept_flags
= params
->filter
.accept_flags
;
2147 return qed_configure_filter_rx_mode(cdev
, accept_flags
);
2149 DP_NOTICE(cdev
, "Unknown filter type %d\n",
2155 static int qed_fp_cqe_completion(struct qed_dev
*dev
,
2157 struct eth_slow_path_rx_cqe
*cqe
)
2159 return qed_eth_cqe_completion(&dev
->hwfns
[rss_id
% dev
->num_hwfns
],
2163 #ifdef CONFIG_QED_SRIOV
2164 extern const struct qed_iov_hv_ops qed_iov_ops_pass
;
2168 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass
;
2171 static const struct qed_eth_ops qed_eth_ops_pass
= {
2172 .common
= &qed_common_ops_pass
,
2173 #ifdef CONFIG_QED_SRIOV
2174 .iov
= &qed_iov_ops_pass
,
2177 .dcb
= &qed_dcbnl_ops_pass
,
2179 .fill_dev_info
= &qed_fill_eth_dev_info
,
2180 .register_ops
= &qed_register_eth_ops
,
2181 .check_mac
= &qed_check_mac
,
2182 .vport_start
= &qed_start_vport
,
2183 .vport_stop
= &qed_stop_vport
,
2184 .vport_update
= &qed_update_vport
,
2185 .q_rx_start
= &qed_start_rxq
,
2186 .q_rx_stop
= &qed_stop_rxq
,
2187 .q_tx_start
= &qed_start_txq
,
2188 .q_tx_stop
= &qed_stop_txq
,
2189 .filter_config
= &qed_configure_filter
,
2190 .fastpath_stop
= &qed_fastpath_stop
,
2191 .eth_cqe_completion
= &qed_fp_cqe_completion
,
2192 .get_vport_stats
= &qed_get_vport_stats
,
2193 .tunn_config
= &qed_tunn_configure
,
2196 const struct qed_eth_ops
*qed_get_eth_ops(void)
2198 return &qed_eth_ops_pass
;
2200 EXPORT_SYMBOL(qed_get_eth_ops
);
2202 void qed_put_eth_ops(void)
2204 /* TODO - reference count for module? */
2206 EXPORT_SYMBOL(qed_put_eth_ops
);