Commit | Line | Data |
---|---|---|
511e6bc0 | 1 | /* |
2 | * Copyright (c) 2014-2015 Hisilicon Limited. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/etherdevice.h> | |
11 | #include <linux/netdevice.h> | |
12 | #include <linux/spinlock.h> | |
13 | ||
14 | #include "hnae.h" | |
15 | #include "hns_dsaf_mac.h" | |
16 | #include "hns_dsaf_main.h" | |
17 | #include "hns_dsaf_ppe.h" | |
18 | #include "hns_dsaf_rcb.h" | |
19 | ||
20 | #define AE_NAME_PORT_ID_IDX 6 | |
21 | #define ETH_STATIC_REG 1 | |
22 | #define ETH_DUMP_REG 5 | |
23 | #define ETH_GSTRING_LEN 32 | |
24 | ||
25 | static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle) | |
26 | { | |
27 | struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); | |
28 | ||
29 | return vf_cb->mac_cb; | |
30 | } | |
31 | ||
32 | /** | |
33 | * hns_ae_map_eport_to_dport - translate enet port id to dsaf port id | |
34 | * @port_id: enet port id | |
35 | *: debug port 0-1, service port 2 -7 (dsaf mode only 2) | |
36 | * return: dsaf port id | |
37 | *: service ports 0 - 5, debug port 6-7 | |
38 | **/ | |
39 | static int hns_ae_map_eport_to_dport(u32 port_id) | |
40 | { | |
41 | int port_index; | |
42 | ||
43 | if (port_id < DSAF_DEBUG_NW_NUM) | |
44 | port_index = port_id + DSAF_SERVICE_PORT_NUM_PER_DSAF; | |
45 | else | |
46 | port_index = port_id - DSAF_DEBUG_NW_NUM; | |
47 | ||
48 | return port_index; | |
49 | } | |
50 | ||
51 | static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev) | |
52 | { | |
53 | return container_of(dev, struct dsaf_device, ae_dev); | |
54 | } | |
55 | ||
56 | static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle) | |
57 | { | |
58 | int ppe_index; | |
59 | int ppe_common_index; | |
60 | struct ppe_common_cb *ppe_comm; | |
61 | struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); | |
62 | ||
63 | if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) { | |
64 | ppe_index = vf_cb->port_index; | |
65 | ppe_common_index = 0; | |
66 | } else { | |
67 | ppe_index = 0; | |
68 | ppe_common_index = | |
69 | vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1; | |
70 | } | |
71 | ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index]; | |
72 | return &ppe_comm->ppe_cb[ppe_index]; | |
73 | } | |
74 | ||
75 | static int hns_ae_get_q_num_per_vf( | |
76 | struct dsaf_device *dsaf_dev, int port) | |
77 | { | |
78 | int common_idx = hns_dsaf_get_comm_idx_by_port(port); | |
79 | ||
80 | return dsaf_dev->rcb_common[common_idx]->max_q_per_vf; | |
81 | } | |
82 | ||
83 | static int hns_ae_get_vf_num_per_port( | |
84 | struct dsaf_device *dsaf_dev, int port) | |
85 | { | |
86 | int common_idx = hns_dsaf_get_comm_idx_by_port(port); | |
87 | ||
88 | return dsaf_dev->rcb_common[common_idx]->max_vfn; | |
89 | } | |
90 | ||
91 | static struct ring_pair_cb *hns_ae_get_base_ring_pair( | |
92 | struct dsaf_device *dsaf_dev, int port) | |
93 | { | |
94 | int common_idx = hns_dsaf_get_comm_idx_by_port(port); | |
95 | struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[common_idx]; | |
96 | int q_num = rcb_comm->max_q_per_vf; | |
97 | int vf_num = rcb_comm->max_vfn; | |
98 | ||
89a44093 | 99 | if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) |
511e6bc0 | 100 | return &rcb_comm->ring_pair_cb[port * q_num * vf_num]; |
101 | else | |
102 | return &rcb_comm->ring_pair_cb[0]; | |
103 | } | |
104 | ||
105 | static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q) | |
106 | { | |
107 | return container_of(q, struct ring_pair_cb, q); | |
108 | } | |
109 | ||
110 | struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, | |
111 | u32 port_id) | |
112 | { | |
113 | int port_idx; | |
114 | int vfnum_per_port; | |
115 | int qnum_per_vf; | |
116 | int i; | |
117 | struct dsaf_device *dsaf_dev; | |
118 | struct hnae_handle *ae_handle; | |
119 | struct ring_pair_cb *ring_pair_cb; | |
120 | struct hnae_vf_cb *vf_cb; | |
121 | ||
122 | dsaf_dev = hns_ae_get_dsaf_dev(dev); | |
123 | port_idx = hns_ae_map_eport_to_dport(port_id); | |
124 | ||
125 | ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_idx); | |
126 | vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_idx); | |
127 | qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_idx); | |
128 | ||
129 | vf_cb = kzalloc(sizeof(*vf_cb) + | |
130 | qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL); | |
131 | if (unlikely(!vf_cb)) { | |
132 | dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n"); | |
133 | ae_handle = ERR_PTR(-ENOMEM); | |
134 | goto handle_err; | |
135 | } | |
136 | ae_handle = &vf_cb->ae_handle; | |
137 | /* ae_handle Init */ | |
138 | ae_handle->owner_dev = dsaf_dev->dev; | |
139 | ae_handle->dev = dev; | |
140 | ae_handle->q_num = qnum_per_vf; | |
141 | ||
142 | /* find ring pair, and set vf id*/ | |
143 | for (ae_handle->vf_id = 0; | |
144 | ae_handle->vf_id < vfnum_per_port; ae_handle->vf_id++) { | |
145 | if (!ring_pair_cb->used_by_vf) | |
146 | break; | |
147 | ring_pair_cb += qnum_per_vf; | |
148 | } | |
149 | if (ae_handle->vf_id >= vfnum_per_port) { | |
150 | dev_err(dsaf_dev->dev, "malloc queue fail!\n"); | |
151 | ae_handle = ERR_PTR(-EINVAL); | |
152 | goto vf_id_err; | |
153 | } | |
154 | ||
155 | ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1); | |
156 | for (i = 0; i < qnum_per_vf; i++) { | |
157 | ae_handle->qs[i] = &ring_pair_cb->q; | |
158 | ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i]; | |
159 | ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i]; | |
160 | ||
161 | ring_pair_cb->used_by_vf = 1; | |
511e6bc0 | 162 | ring_pair_cb++; |
163 | } | |
164 | ||
165 | vf_cb->dsaf_dev = dsaf_dev; | |
166 | vf_cb->port_index = port_idx; | |
167 | vf_cb->mac_cb = &dsaf_dev->mac_cb[port_idx]; | |
168 | ||
169 | ae_handle->phy_if = vf_cb->mac_cb->phy_if; | |
170 | ae_handle->phy_node = vf_cb->mac_cb->phy_node; | |
171 | ae_handle->if_support = vf_cb->mac_cb->if_support; | |
172 | ae_handle->port_type = vf_cb->mac_cb->mac_type; | |
f8a1a636 | 173 | ae_handle->dport_id = port_idx; |
511e6bc0 | 174 | |
175 | return ae_handle; | |
176 | vf_id_err: | |
177 | kfree(vf_cb); | |
178 | handle_err: | |
179 | return ae_handle; | |
180 | } | |
181 | ||
182 | static void hns_ae_put_handle(struct hnae_handle *handle) | |
183 | { | |
184 | struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); | |
185 | int i; | |
186 | ||
187 | vf_cb->mac_cb = NULL; | |
188 | ||
189 | kfree(vf_cb); | |
190 | ||
191 | for (i = 0; i < handle->q_num; i++) | |
192 | hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; | |
193 | } | |
194 | ||
195 | static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val) | |
196 | { | |
197 | int q_num = handle->q_num; | |
198 | int i; | |
199 | ||
200 | for (i = 0; i < q_num; i++) | |
201 | hns_rcb_ring_enable_hw(handle->qs[i], val); | |
202 | } | |
203 | ||
204 | static void hns_ae_init_queue(struct hnae_queue *q) | |
205 | { | |
206 | struct ring_pair_cb *ring = | |
207 | container_of(q, struct ring_pair_cb, q); | |
208 | ||
209 | hns_rcb_init_hw(ring); | |
210 | } | |
211 | ||
212 | static void hns_ae_fini_queue(struct hnae_queue *q) | |
213 | { | |
214 | struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(q->handle); | |
215 | ||
216 | if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE) | |
217 | hns_rcb_reset_ring_hw(q); | |
218 | } | |
219 | ||
220 | static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p) | |
221 | { | |
222 | int ret; | |
223 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | |
224 | ||
225 | if (!p || !is_valid_ether_addr((const u8 *)p)) { | |
226 | dev_err(handle->owner_dev, "is not valid ether addr !\n"); | |
227 | return -EADDRNOTAVAIL; | |
228 | } | |
229 | ||
230 | ret = hns_mac_change_vf_addr(mac_cb, handle->vf_id, p); | |
231 | if (ret != 0) { | |
232 | dev_err(handle->owner_dev, | |
233 | "set_mac_address fail, ret=%d!\n", ret); | |
234 | return ret; | |
235 | } | |
236 | ||
237 | return 0; | |
238 | } | |
239 | ||
240 | static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr) | |
241 | { | |
242 | int ret; | |
243 | char *mac_addr = (char *)addr; | |
244 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | |
245 | ||
246 | assert(mac_cb); | |
247 | ||
248 | if (mac_cb->mac_type != HNAE_PORT_SERVICE) | |
249 | return 0; | |
250 | ||
13ac695e | 251 | ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, true); |
511e6bc0 | 252 | if (ret) { |
253 | dev_err(handle->owner_dev, | |
254 | "mac add mul_mac:%pM port%d fail, ret = %#x!\n", | |
255 | mac_addr, mac_cb->mac_id, ret); | |
256 | return ret; | |
257 | } | |
258 | ||
259 | ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM, | |
13ac695e | 260 | mac_addr, true); |
511e6bc0 | 261 | if (ret) |
262 | dev_err(handle->owner_dev, | |
263 | "mac add mul_mac:%pM port%d fail, ret = %#x!\n", | |
264 | mac_addr, DSAF_BASE_INNER_PORT_NUM, ret); | |
265 | ||
266 | return ret; | |
267 | } | |
268 | ||
269 | static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu) | |
270 | { | |
271 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | |
272 | ||
273 | return hns_mac_set_mtu(mac_cb, new_mtu); | |
274 | } | |
275 | ||
64353af6 S |
276 | static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable) |
277 | { | |
278 | struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); | |
279 | ||
280 | hns_ppe_set_tso_enable(ppe_cb, enable); | |
281 | } | |
282 | ||
511e6bc0 | 283 | static int hns_ae_start(struct hnae_handle *handle) |
284 | { | |
285 | int ret; | |
286 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | |
287 | ||
13ac695e | 288 | ret = hns_mac_vm_config_bc_en(mac_cb, 0, true); |
511e6bc0 | 289 | if (ret) |
290 | return ret; | |
291 | ||
292 | hns_ae_ring_enable_all(handle, 1); | |
293 | msleep(100); | |
294 | ||
295 | hns_mac_start(mac_cb); | |
296 | ||
297 | return 0; | |
298 | } | |
299 | ||
300 | void hns_ae_stop(struct hnae_handle *handle) | |
301 | { | |
302 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | |
303 | ||
304 | /* just clean tx fbd, neednot rx fbd*/ | |
305 | hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_TX); | |
306 | ||
307 | msleep(20); | |
308 | ||
309 | hns_mac_stop(mac_cb); | |
310 | ||
311 | usleep_range(10000, 20000); | |
312 | ||
313 | hns_ae_ring_enable_all(handle, 0); | |
314 | ||
13ac695e | 315 | (void)hns_mac_vm_config_bc_en(mac_cb, 0, false); |
511e6bc0 | 316 | } |
317 | ||
318 | static void hns_ae_reset(struct hnae_handle *handle) | |
319 | { | |
320 | struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); | |
321 | ||
322 | if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) { | |
323 | u8 ppe_common_index = | |
324 | vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1; | |
325 | ||
326 | hns_mac_reset(vf_cb->mac_cb); | |
327 | hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index); | |
328 | } | |
329 | } | |
330 | ||
331 | void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask) | |
332 | { | |
333 | u32 flag; | |
334 | ||
335 | if (is_tx_ring(ring)) | |
336 | flag = RCB_INT_FLAG_TX; | |
337 | else | |
338 | flag = RCB_INT_FLAG_RX; | |
339 | ||
511e6bc0 | 340 | hns_rcb_int_ctrl_hw(ring->q, flag, mask); |
341 | } | |
342 | ||
13ac695e S |
343 | static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask) |
344 | { | |
345 | u32 flag; | |
346 | ||
347 | if (is_tx_ring(ring)) | |
348 | flag = RCB_INT_FLAG_TX; | |
349 | else | |
350 | flag = RCB_INT_FLAG_RX; | |
351 | ||
352 | hns_rcbv2_int_ctrl_hw(ring->q, flag, mask); | |
353 | } | |
354 | ||
511e6bc0 | 355 | static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val) |
356 | { | |
13ac695e S |
357 | struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(queue->dev); |
358 | ||
359 | if (AE_IS_VER1(dsaf_dev->dsaf_ver)) | |
360 | hns_rcb_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); | |
361 | else | |
362 | hns_rcbv2_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); | |
363 | ||
511e6bc0 | 364 | hns_rcb_start(queue, val); |
365 | } | |
366 | ||
367 | static int hns_ae_get_link_status(struct hnae_handle *handle) | |
368 | { | |
369 | u32 link_status; | |
370 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | |
371 | ||
372 | hns_mac_get_link_status(mac_cb, &link_status); | |
373 | ||
374 | return !!link_status; | |
375 | } | |
376 | ||
377 | static int hns_ae_get_mac_info(struct hnae_handle *handle, | |
378 | u8 *auto_neg, u16 *speed, u8 *duplex) | |
379 | { | |
380 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | |
381 | ||
382 | return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex); | |
383 | } | |
384 | ||
385 | static void hns_ae_adjust_link(struct hnae_handle *handle, int speed, | |
386 | int duplex) | |
387 | { | |
388 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | |
389 | ||
390 | hns_mac_adjust_link(mac_cb, speed, duplex); | |
391 | } | |
392 | ||
393 | static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, | |
394 | u32 *uplimit) | |
395 | { | |
396 | *uplimit = HNS_RCB_RING_MAX_PENDING_BD; | |
397 | } | |
398 | ||
399 | static void hns_ae_get_pauseparam(struct hnae_handle *handle, | |
400 | u32 *auto_neg, u32 *rx_en, u32 *tx_en) | |
401 | { | |
5ada37b5 L |
402 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); |
403 | struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; | |
511e6bc0 | 404 | |
5ada37b5 | 405 | hns_mac_get_autoneg(mac_cb, auto_neg); |
511e6bc0 | 406 | |
5ada37b5 L |
407 | hns_mac_get_pauseparam(mac_cb, rx_en, tx_en); |
408 | ||
409 | /* Service port's pause feature is provided by DSAF, not mac */ | |
410 | if (handle->port_type == HNAE_PORT_SERVICE) | |
411 | hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en); | |
511e6bc0 | 412 | } |
413 | ||
414 | static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable) | |
415 | { | |
416 | assert(handle); | |
417 | ||
418 | return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable); | |
419 | } | |
420 | ||
4568637f | 421 | static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en) |
422 | { | |
d5679849 KY |
423 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); |
424 | ||
4568637f | 425 | hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en); |
d5679849 | 426 | hns_mac_set_promisc(mac_cb, (u8)!!en); |
4568637f | 427 | } |
428 | ||
511e6bc0 | 429 | static int hns_ae_get_autoneg(struct hnae_handle *handle) |
430 | { | |
431 | u32 auto_neg; | |
432 | ||
433 | assert(handle); | |
434 | ||
435 | hns_mac_get_autoneg(hns_get_mac_cb(handle), &auto_neg); | |
436 | ||
437 | return auto_neg; | |
438 | } | |
439 | ||
440 | static int hns_ae_set_pauseparam(struct hnae_handle *handle, | |
441 | u32 autoneg, u32 rx_en, u32 tx_en) | |
442 | { | |
443 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); | |
5ada37b5 | 444 | struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; |
511e6bc0 | 445 | int ret; |
446 | ||
447 | ret = hns_mac_set_autoneg(mac_cb, autoneg); | |
448 | if (ret) | |
449 | return ret; | |
450 | ||
5ada37b5 L |
451 | /* Service port's pause feature is provided by DSAF, not mac */ |
452 | if (handle->port_type == HNAE_PORT_SERVICE) { | |
453 | ret = hns_dsaf_set_rx_mac_pause_en(dsaf_dev, | |
454 | mac_cb->mac_id, rx_en); | |
455 | if (ret) | |
456 | return ret; | |
457 | rx_en = 0; | |
458 | } | |
511e6bc0 | 459 | return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en); |
460 | } | |
461 | ||
462 | static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle, | |
463 | u32 *tx_usecs, u32 *rx_usecs) | |
464 | { | |
43adc067 L |
465 | struct ring_pair_cb *ring_pair = |
466 | container_of(handle->qs[0], struct ring_pair_cb, q); | |
511e6bc0 | 467 | |
43adc067 L |
468 | *tx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common, |
469 | ring_pair->port_id_in_comm); | |
470 | *rx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common, | |
471 | ring_pair->port_id_in_comm); | |
511e6bc0 | 472 | } |
473 | ||
474 | static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle, | |
475 | u32 *tx_frames, u32 *rx_frames) | |
476 | { | |
43adc067 L |
477 | struct ring_pair_cb *ring_pair = |
478 | container_of(handle->qs[0], struct ring_pair_cb, q); | |
511e6bc0 | 479 | |
43adc067 L |
480 | *tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common, |
481 | ring_pair->port_id_in_comm); | |
482 | *rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common, | |
483 | ring_pair->port_id_in_comm); | |
511e6bc0 | 484 | } |
485 | ||
9832ce4c L |
486 | static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle, |
487 | u32 timeout) | |
511e6bc0 | 488 | { |
43adc067 L |
489 | struct ring_pair_cb *ring_pair = |
490 | container_of(handle->qs[0], struct ring_pair_cb, q); | |
511e6bc0 | 491 | |
9832ce4c | 492 | return hns_rcb_set_coalesce_usecs( |
43adc067 | 493 | ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout); |
511e6bc0 | 494 | } |
495 | ||
496 | static int hns_ae_set_coalesce_frames(struct hnae_handle *handle, | |
497 | u32 coalesce_frames) | |
498 | { | |
43adc067 L |
499 | struct ring_pair_cb *ring_pair = |
500 | container_of(handle->qs[0], struct ring_pair_cb, q); | |
511e6bc0 | 501 | |
43adc067 L |
502 | return hns_rcb_set_coalesced_frames( |
503 | ring_pair->rcb_common, | |
504 | ring_pair->port_id_in_comm, coalesce_frames); | |
511e6bc0 | 505 | } |
506 | ||
507 | void hns_ae_update_stats(struct hnae_handle *handle, | |
508 | struct net_device_stats *net_stats) | |
509 | { | |
510 | int port; | |
511 | int idx; | |
512 | struct dsaf_device *dsaf_dev; | |
513 | struct hns_mac_cb *mac_cb; | |
514 | struct hns_ppe_cb *ppe_cb; | |
515 | struct hnae_queue *queue; | |
516 | struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); | |
517 | u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0; | |
518 | u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0; | |
519 | u64 rx_missed_errors = 0; | |
520 | ||
521 | dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); | |
522 | if (!dsaf_dev) | |
523 | return; | |
524 | port = vf_cb->port_index; | |
525 | ppe_cb = hns_get_ppe_cb(handle); | |
526 | mac_cb = hns_get_mac_cb(handle); | |
527 | ||
528 | for (idx = 0; idx < handle->q_num; idx++) { | |
529 | queue = handle->qs[idx]; | |
530 | hns_rcb_update_stats(queue); | |
531 | ||
532 | tx_bytes += queue->tx_ring.stats.tx_bytes; | |
533 | tx_packets += queue->tx_ring.stats.tx_pkts; | |
534 | rx_bytes += queue->rx_ring.stats.rx_bytes; | |
535 | rx_packets += queue->rx_ring.stats.rx_pkts; | |
536 | ||
537 | rx_errors += queue->rx_ring.stats.err_pkt_len | |
538 | + queue->rx_ring.stats.l2_err | |
539 | + queue->rx_ring.stats.l3l4_csum_err; | |
540 | } | |
541 | ||
542 | hns_ppe_update_stats(ppe_cb); | |
543 | rx_missed_errors = ppe_cb->hw_stats.rx_drop_no_buf; | |
544 | tx_errors += ppe_cb->hw_stats.tx_err_checksum | |
545 | + ppe_cb->hw_stats.tx_err_fifo_empty; | |
546 | ||
547 | if (mac_cb->mac_type == HNAE_PORT_SERVICE) { | |
548 | hns_dsaf_update_stats(dsaf_dev, port); | |
549 | /* for port upline direction, i.e., rx. */ | |
550 | rx_missed_errors += dsaf_dev->hw_stats[port].bp_drop; | |
551 | rx_missed_errors += dsaf_dev->hw_stats[port].pad_drop; | |
552 | rx_missed_errors += dsaf_dev->hw_stats[port].crc_false; | |
553 | ||
554 | /* for port downline direction, i.e., tx. */ | |
555 | port = port + DSAF_PPE_INODE_BASE; | |
556 | hns_dsaf_update_stats(dsaf_dev, port); | |
557 | tx_dropped += dsaf_dev->hw_stats[port].bp_drop; | |
558 | tx_dropped += dsaf_dev->hw_stats[port].pad_drop; | |
559 | tx_dropped += dsaf_dev->hw_stats[port].crc_false; | |
560 | tx_dropped += dsaf_dev->hw_stats[port].rslt_drop; | |
561 | tx_dropped += dsaf_dev->hw_stats[port].vlan_drop; | |
562 | tx_dropped += dsaf_dev->hw_stats[port].stp_drop; | |
563 | } | |
564 | ||
565 | hns_mac_update_stats(mac_cb); | |
566 | rx_errors += mac_cb->hw_stats.rx_fifo_overrun_err; | |
567 | ||
568 | tx_errors += mac_cb->hw_stats.tx_bad_pkts | |
569 | + mac_cb->hw_stats.tx_fragment_err | |
570 | + mac_cb->hw_stats.tx_jabber_err | |
571 | + mac_cb->hw_stats.tx_underrun_err | |
572 | + mac_cb->hw_stats.tx_crc_err; | |
573 | ||
574 | net_stats->tx_bytes = tx_bytes; | |
575 | net_stats->tx_packets = tx_packets; | |
576 | net_stats->rx_bytes = rx_bytes; | |
577 | net_stats->rx_dropped = 0; | |
578 | net_stats->rx_packets = rx_packets; | |
579 | net_stats->rx_errors = rx_errors; | |
580 | net_stats->tx_errors = tx_errors; | |
581 | net_stats->tx_dropped = tx_dropped; | |
582 | net_stats->rx_missed_errors = rx_missed_errors; | |
583 | net_stats->rx_crc_errors = mac_cb->hw_stats.rx_fcs_err; | |
584 | net_stats->rx_frame_errors = mac_cb->hw_stats.rx_align_err; | |
585 | net_stats->rx_fifo_errors = mac_cb->hw_stats.rx_fifo_overrun_err; | |
586 | net_stats->rx_length_errors = mac_cb->hw_stats.rx_len_err; | |
587 | net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts; | |
588 | } | |
589 | ||
590 | void hns_ae_get_stats(struct hnae_handle *handle, u64 *data) | |
591 | { | |
592 | int idx; | |
593 | struct hns_mac_cb *mac_cb; | |
594 | struct hns_ppe_cb *ppe_cb; | |
595 | u64 *p = data; | |
596 | struct hnae_vf_cb *vf_cb; | |
597 | ||
598 | if (!handle || !data) { | |
599 | pr_err("hns_ae_get_stats NULL handle or data pointer!\n"); | |
600 | return; | |
601 | } | |
602 | ||
603 | vf_cb = hns_ae_get_vf_cb(handle); | |
604 | mac_cb = hns_get_mac_cb(handle); | |
605 | ppe_cb = hns_get_ppe_cb(handle); | |
606 | ||
607 | for (idx = 0; idx < handle->q_num; idx++) { | |
608 | hns_rcb_get_stats(handle->qs[idx], p); | |
609 | p += hns_rcb_get_ring_sset_count((int)ETH_SS_STATS); | |
610 | } | |
611 | ||
612 | hns_ppe_get_stats(ppe_cb, p); | |
613 | p += hns_ppe_get_sset_count((int)ETH_SS_STATS); | |
614 | ||
615 | hns_mac_get_stats(mac_cb, p); | |
616 | p += hns_mac_get_sset_count(mac_cb, (int)ETH_SS_STATS); | |
617 | ||
618 | if (mac_cb->mac_type == HNAE_PORT_SERVICE) | |
619 | hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index); | |
620 | } | |
621 | ||
622 | void hns_ae_get_strings(struct hnae_handle *handle, | |
623 | u32 stringset, u8 *data) | |
624 | { | |
625 | int port; | |
626 | int idx; | |
627 | struct hns_mac_cb *mac_cb; | |
628 | struct hns_ppe_cb *ppe_cb; | |
629 | u8 *p = data; | |
630 | struct hnae_vf_cb *vf_cb; | |
631 | ||
632 | assert(handle); | |
633 | ||
634 | vf_cb = hns_ae_get_vf_cb(handle); | |
635 | port = vf_cb->port_index; | |
636 | mac_cb = hns_get_mac_cb(handle); | |
637 | ppe_cb = hns_get_ppe_cb(handle); | |
638 | ||
639 | for (idx = 0; idx < handle->q_num; idx++) { | |
640 | hns_rcb_get_strings(stringset, p, idx); | |
641 | p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset); | |
642 | } | |
643 | ||
644 | hns_ppe_get_strings(ppe_cb, stringset, p); | |
645 | p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset); | |
646 | ||
647 | hns_mac_get_strings(mac_cb, stringset, p); | |
648 | p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset); | |
649 | ||
650 | if (mac_cb->mac_type == HNAE_PORT_SERVICE) | |
651 | hns_dsaf_get_strings(stringset, p, port); | |
652 | } | |
653 | ||
654 | int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset) | |
655 | { | |
656 | u32 sset_count = 0; | |
657 | struct hns_mac_cb *mac_cb; | |
658 | ||
659 | assert(handle); | |
660 | ||
661 | mac_cb = hns_get_mac_cb(handle); | |
662 | ||
663 | sset_count += hns_rcb_get_ring_sset_count(stringset) * handle->q_num; | |
664 | sset_count += hns_ppe_get_sset_count(stringset); | |
665 | sset_count += hns_mac_get_sset_count(mac_cb, stringset); | |
666 | ||
667 | if (mac_cb->mac_type == HNAE_PORT_SERVICE) | |
668 | sset_count += hns_dsaf_get_sset_count(stringset); | |
669 | ||
670 | return sset_count; | |
671 | } | |
672 | ||
673 | static int hns_ae_config_loopback(struct hnae_handle *handle, | |
674 | enum hnae_loop loop, int en) | |
675 | { | |
676 | int ret; | |
677 | struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); | |
68c222a6 | 678 | struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); |
511e6bc0 | 679 | |
680 | switch (loop) { | |
68c222a6 | 681 | case MAC_INTERNALLOOP_PHY: |
682 | ret = 0; | |
683 | break; | |
511e6bc0 | 684 | case MAC_INTERNALLOOP_SERDES: |
685 | ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en); | |
686 | break; | |
687 | case MAC_INTERNALLOOP_MAC: | |
688 | ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en); | |
689 | break; | |
690 | default: | |
691 | ret = -EINVAL; | |
692 | } | |
68c222a6 | 693 | |
694 | if (!ret) | |
695 | hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en); | |
696 | ||
511e6bc0 | 697 | return ret; |
698 | } | |
699 | ||
700 | void hns_ae_update_led_status(struct hnae_handle *handle) | |
701 | { | |
702 | struct hns_mac_cb *mac_cb; | |
703 | ||
704 | assert(handle); | |
705 | mac_cb = hns_get_mac_cb(handle); | |
706 | if (!mac_cb->cpld_vaddr) | |
707 | return; | |
708 | hns_set_led_opt(mac_cb); | |
709 | } | |
710 | ||
711 | int hns_ae_cpld_set_led_id(struct hnae_handle *handle, | |
712 | enum hnae_led_state status) | |
713 | { | |
714 | struct hns_mac_cb *mac_cb; | |
715 | ||
716 | assert(handle); | |
717 | ||
718 | mac_cb = hns_get_mac_cb(handle); | |
719 | ||
720 | return hns_cpld_led_set_id(mac_cb, status); | |
721 | } | |
722 | ||
723 | void hns_ae_get_regs(struct hnae_handle *handle, void *data) | |
724 | { | |
725 | u32 *p = data; | |
726 | u32 rcb_com_idx; | |
727 | int i; | |
728 | struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); | |
729 | struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); | |
730 | ||
731 | hns_ppe_get_regs(ppe_cb, p); | |
732 | p += hns_ppe_get_regs_count(); | |
733 | ||
734 | rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index); | |
735 | hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p); | |
736 | p += hns_rcb_get_common_regs_count(); | |
737 | ||
738 | for (i = 0; i < handle->q_num; i++) { | |
739 | hns_rcb_get_ring_regs(handle->qs[i], p); | |
740 | p += hns_rcb_get_ring_regs_count(); | |
741 | } | |
742 | ||
743 | hns_mac_get_regs(vf_cb->mac_cb, p); | |
744 | p += hns_mac_get_regs_count(vf_cb->mac_cb); | |
745 | ||
746 | if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE) | |
747 | hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p); | |
748 | } | |
749 | ||
750 | int hns_ae_get_regs_len(struct hnae_handle *handle) | |
751 | { | |
752 | u32 total_num; | |
753 | struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); | |
754 | ||
755 | total_num = hns_ppe_get_regs_count(); | |
756 | total_num += hns_rcb_get_common_regs_count(); | |
757 | total_num += hns_rcb_get_ring_regs_count() * handle->q_num; | |
758 | total_num += hns_mac_get_regs_count(vf_cb->mac_cb); | |
759 | ||
760 | if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE) | |
761 | total_num += hns_dsaf_get_regs_count(); | |
762 | ||
763 | return total_num; | |
764 | } | |
765 | ||
6bc0ce7d S |
766 | static u32 hns_ae_get_rss_key_size(struct hnae_handle *handle) |
767 | { | |
768 | return HNS_PPEV2_RSS_KEY_SIZE; | |
769 | } | |
770 | ||
771 | static u32 hns_ae_get_rss_indir_size(struct hnae_handle *handle) | |
772 | { | |
773 | return HNS_PPEV2_RSS_IND_TBL_SIZE; | |
774 | } | |
775 | ||
776 | static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key, | |
777 | u8 *hfunc) | |
778 | { | |
779 | struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); | |
780 | ||
781 | /* currently we support only one type of hash function i.e. Toep hash */ | |
782 | if (hfunc) | |
783 | *hfunc = ETH_RSS_HASH_TOP; | |
784 | ||
785 | /* get the RSS Key required by the user */ | |
786 | if (key) | |
787 | memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE); | |
788 | ||
789 | /* update the current hash->queue mappings from the shadow RSS table */ | |
717dd807 KY |
790 | memcpy(indir, ppe_cb->rss_indir_table, |
791 | HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir)); | |
6bc0ce7d S |
792 | |
793 | return 0; | |
794 | } | |
795 | ||
796 | static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir, | |
797 | const u8 *key, const u8 hfunc) | |
798 | { | |
799 | struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); | |
800 | ||
801 | /* set the RSS Hash Key if specififed by the user */ | |
802 | if (key) | |
beecfe9e | 803 | hns_ppe_set_rss_key(ppe_cb, (u32 *)key); |
6bc0ce7d S |
804 | |
805 | /* update the shadow RSS table with user specified qids */ | |
717dd807 KY |
806 | memcpy(ppe_cb->rss_indir_table, indir, |
807 | HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir)); | |
6bc0ce7d S |
808 | |
809 | /* now update the hardware */ | |
810 | hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table); | |
811 | ||
812 | return 0; | |
813 | } | |
814 | ||
511e6bc0 | 815 | static struct hnae_ae_ops hns_dsaf_ops = { |
816 | .get_handle = hns_ae_get_handle, | |
817 | .put_handle = hns_ae_put_handle, | |
818 | .init_queue = hns_ae_init_queue, | |
819 | .fini_queue = hns_ae_fini_queue, | |
820 | .start = hns_ae_start, | |
821 | .stop = hns_ae_stop, | |
822 | .reset = hns_ae_reset, | |
823 | .toggle_ring_irq = hns_ae_toggle_ring_irq, | |
824 | .toggle_queue_status = hns_ae_toggle_queue_status, | |
825 | .get_status = hns_ae_get_link_status, | |
826 | .get_info = hns_ae_get_mac_info, | |
827 | .adjust_link = hns_ae_adjust_link, | |
828 | .set_loopback = hns_ae_config_loopback, | |
829 | .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, | |
830 | .get_pauseparam = hns_ae_get_pauseparam, | |
831 | .set_autoneg = hns_ae_set_autoneg, | |
832 | .get_autoneg = hns_ae_get_autoneg, | |
833 | .set_pauseparam = hns_ae_set_pauseparam, | |
834 | .get_coalesce_usecs = hns_ae_get_coalesce_usecs, | |
835 | .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames, | |
836 | .set_coalesce_usecs = hns_ae_set_coalesce_usecs, | |
837 | .set_coalesce_frames = hns_ae_set_coalesce_frames, | |
4568637f | 838 | .set_promisc_mode = hns_ae_set_promisc_mode, |
511e6bc0 | 839 | .set_mac_addr = hns_ae_set_mac_address, |
840 | .set_mc_addr = hns_ae_set_multicast_one, | |
841 | .set_mtu = hns_ae_set_mtu, | |
842 | .update_stats = hns_ae_update_stats, | |
64353af6 | 843 | .set_tso_stats = hns_ae_set_tso_stats, |
511e6bc0 | 844 | .get_stats = hns_ae_get_stats, |
845 | .get_strings = hns_ae_get_strings, | |
846 | .get_sset_count = hns_ae_get_sset_count, | |
847 | .update_led_status = hns_ae_update_led_status, | |
848 | .set_led_id = hns_ae_cpld_set_led_id, | |
849 | .get_regs = hns_ae_get_regs, | |
6bc0ce7d S |
850 | .get_regs_len = hns_ae_get_regs_len, |
851 | .get_rss_key_size = hns_ae_get_rss_key_size, | |
852 | .get_rss_indir_size = hns_ae_get_rss_indir_size, | |
853 | .get_rss = hns_ae_get_rss, | |
854 | .set_rss = hns_ae_set_rss | |
511e6bc0 | 855 | }; |
856 | ||
857 | int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev) | |
858 | { | |
859 | struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev; | |
48189d6a | 860 | static atomic_t id = ATOMIC_INIT(-1); |
511e6bc0 | 861 | |
13ac695e S |
862 | switch (dsaf_dev->dsaf_ver) { |
863 | case AE_VERSION_1: | |
864 | hns_dsaf_ops.toggle_ring_irq = hns_ae_toggle_ring_irq; | |
865 | break; | |
866 | case AE_VERSION_2: | |
867 | hns_dsaf_ops.toggle_ring_irq = hns_aev2_toggle_ring_irq; | |
868 | break; | |
869 | default: | |
870 | break; | |
871 | } | |
48189d6a | 872 | |
873 | snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME, | |
874 | (int)atomic_inc_return(&id)); | |
511e6bc0 | 875 | ae_dev->ops = &hns_dsaf_ops; |
876 | ae_dev->dev = dsaf_dev->dev; | |
877 | ||
878 | return hnae_ae_register(ae_dev, THIS_MODULE); | |
879 | } | |
880 | ||
881 | void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev) | |
882 | { | |
883 | hnae_ae_unregister(&dsaf_dev->ae_dev); | |
884 | } |