Commit | Line | Data |
---|---|---|
b5996f11 | 1 | /* |
2 | * Copyright (c) 2014-2015 Hisilicon Limited. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/clk.h> | |
11 | #include <linux/cpumask.h> | |
12 | #include <linux/etherdevice.h> | |
13 | #include <linux/if_vlan.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/io.h> | |
16 | #include <linux/ip.h> | |
17 | #include <linux/ipv6.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/phy.h> | |
20 | #include <linux/platform_device.h> | |
21 | #include <linux/skbuff.h> | |
22 | ||
23 | #include "hnae.h" | |
24 | #include "hns_enet.h" | |
25 | ||
26 | #define NIC_MAX_Q_PER_VF 16 | |
27 | #define HNS_NIC_TX_TIMEOUT (5 * HZ) | |
28 | ||
29 | #define SERVICE_TIMER_HZ (1 * HZ) | |
30 | ||
31 | #define NIC_TX_CLEAN_MAX_NUM 256 | |
32 | #define NIC_RX_CLEAN_MAX_NUM 64 | |
33 | ||
b5996f11 | 34 | #define RCB_IRQ_NOT_INITED 0 |
35 | #define RCB_IRQ_INITED 1 | |
36 | ||
13ac695e S |
37 | #define BD_MAX_SEND_SIZE 8191 |
38 | #define SKB_TMP_LEN(SKB) \ | |
39 | (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) | |
40 | ||
41 | static void fill_v2_desc(struct hnae_ring *ring, void *priv, | |
42 | int size, dma_addr_t dma, int frag_end, | |
43 | int buf_num, enum hns_desc_type type, int mtu) | |
44 | { | |
45 | struct hnae_desc *desc = &ring->desc[ring->next_to_use]; | |
46 | struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; | |
47 | struct iphdr *iphdr; | |
48 | struct ipv6hdr *ipv6hdr; | |
49 | struct sk_buff *skb; | |
50 | int skb_tmp_len; | |
51 | __be16 protocol; | |
52 | u8 bn_pid = 0; | |
53 | u8 rrcfv = 0; | |
54 | u8 ip_offset = 0; | |
55 | u8 tvsvsn = 0; | |
56 | u16 mss = 0; | |
57 | u8 l4_len = 0; | |
58 | u16 paylen = 0; | |
59 | ||
60 | desc_cb->priv = priv; | |
61 | desc_cb->length = size; | |
62 | desc_cb->dma = dma; | |
63 | desc_cb->type = type; | |
64 | ||
65 | desc->addr = cpu_to_le64(dma); | |
66 | desc->tx.send_size = cpu_to_le16((u16)size); | |
67 | ||
68 | /*config bd buffer end */ | |
69 | hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); | |
70 | hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1); | |
71 | ||
72 | if (type == DESC_TYPE_SKB) { | |
73 | skb = (struct sk_buff *)priv; | |
74 | ||
75 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
76 | skb_reset_mac_len(skb); | |
77 | protocol = skb->protocol; | |
78 | ip_offset = ETH_HLEN; | |
79 | ||
80 | if (protocol == htons(ETH_P_8021Q)) { | |
81 | ip_offset += VLAN_HLEN; | |
82 | protocol = vlan_get_protocol(skb); | |
83 | skb->protocol = protocol; | |
84 | } | |
85 | ||
86 | if (skb->protocol == htons(ETH_P_IP)) { | |
87 | iphdr = ip_hdr(skb); | |
88 | hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1); | |
89 | hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); | |
90 | ||
91 | /* check for tcp/udp header */ | |
92 | if (iphdr->protocol == IPPROTO_TCP) { | |
93 | hnae_set_bit(tvsvsn, | |
94 | HNSV2_TXD_TSE_B, 1); | |
95 | skb_tmp_len = SKB_TMP_LEN(skb); | |
96 | l4_len = tcp_hdrlen(skb); | |
97 | mss = mtu - skb_tmp_len - ETH_FCS_LEN; | |
98 | paylen = skb->len - skb_tmp_len; | |
99 | } | |
100 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | |
101 | hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1); | |
102 | ipv6hdr = ipv6_hdr(skb); | |
103 | hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); | |
104 | ||
105 | /* check for tcp/udp header */ | |
106 | if (ipv6hdr->nexthdr == IPPROTO_TCP) { | |
107 | hnae_set_bit(tvsvsn, | |
108 | HNSV2_TXD_TSE_B, 1); | |
109 | skb_tmp_len = SKB_TMP_LEN(skb); | |
110 | l4_len = tcp_hdrlen(skb); | |
111 | mss = mtu - skb_tmp_len - ETH_FCS_LEN; | |
112 | paylen = skb->len - skb_tmp_len; | |
113 | } | |
114 | } | |
115 | desc->tx.ip_offset = ip_offset; | |
116 | desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn; | |
117 | desc->tx.mss = cpu_to_le16(mss); | |
118 | desc->tx.l4_len = l4_len; | |
119 | desc->tx.paylen = cpu_to_le16(paylen); | |
120 | } | |
121 | } | |
122 | ||
123 | hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end); | |
124 | ||
125 | desc->tx.bn_pid = bn_pid; | |
126 | desc->tx.ra_ri_cs_fe_vld = rrcfv; | |
127 | ||
128 | ring_ptr_move_fw(ring, next_to_use); | |
129 | } | |
130 | ||
b5996f11 | 131 | static void fill_desc(struct hnae_ring *ring, void *priv, |
132 | int size, dma_addr_t dma, int frag_end, | |
13ac695e | 133 | int buf_num, enum hns_desc_type type, int mtu) |
b5996f11 | 134 | { |
135 | struct hnae_desc *desc = &ring->desc[ring->next_to_use]; | |
136 | struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; | |
137 | struct sk_buff *skb; | |
138 | __be16 protocol; | |
139 | u32 ip_offset; | |
140 | u32 asid_bufnum_pid = 0; | |
141 | u32 flag_ipoffset = 0; | |
142 | ||
143 | desc_cb->priv = priv; | |
144 | desc_cb->length = size; | |
145 | desc_cb->dma = dma; | |
146 | desc_cb->type = type; | |
147 | ||
148 | desc->addr = cpu_to_le64(dma); | |
149 | desc->tx.send_size = cpu_to_le16((u16)size); | |
150 | ||
151 | /*config bd buffer end */ | |
152 | flag_ipoffset |= 1 << HNS_TXD_VLD_B; | |
153 | ||
154 | asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S; | |
155 | ||
156 | if (type == DESC_TYPE_SKB) { | |
157 | skb = (struct sk_buff *)priv; | |
158 | ||
159 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
160 | protocol = skb->protocol; | |
161 | ip_offset = ETH_HLEN; | |
162 | ||
163 | /*if it is a SW VLAN check the next protocol*/ | |
164 | if (protocol == htons(ETH_P_8021Q)) { | |
165 | ip_offset += VLAN_HLEN; | |
166 | protocol = vlan_get_protocol(skb); | |
167 | skb->protocol = protocol; | |
168 | } | |
169 | ||
170 | if (skb->protocol == htons(ETH_P_IP)) { | |
171 | flag_ipoffset |= 1 << HNS_TXD_L3CS_B; | |
172 | /* check for tcp/udp header */ | |
173 | flag_ipoffset |= 1 << HNS_TXD_L4CS_B; | |
174 | ||
175 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | |
176 | /* ipv6 has not l3 cs, check for L4 header */ | |
177 | flag_ipoffset |= 1 << HNS_TXD_L4CS_B; | |
178 | } | |
179 | ||
180 | flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S; | |
181 | } | |
182 | } | |
183 | ||
184 | flag_ipoffset |= frag_end << HNS_TXD_FE_B; | |
185 | ||
186 | desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid); | |
187 | desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset); | |
188 | ||
189 | ring_ptr_move_fw(ring, next_to_use); | |
190 | } | |
191 | ||
192 | static void unfill_desc(struct hnae_ring *ring) | |
193 | { | |
194 | ring_ptr_move_bw(ring, next_to_use); | |
195 | } | |
196 | ||
13ac695e S |
197 | static int hns_nic_maybe_stop_tx( |
198 | struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) | |
b5996f11 | 199 | { |
13ac695e S |
200 | struct sk_buff *skb = *out_skb; |
201 | struct sk_buff *new_skb = NULL; | |
b5996f11 | 202 | int buf_num; |
b5996f11 | 203 | |
204 | /* no. of segments (plus a header) */ | |
205 | buf_num = skb_shinfo(skb)->nr_frags + 1; | |
206 | ||
207 | if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { | |
13ac695e S |
208 | if (ring_space(ring) < 1) |
209 | return -EBUSY; | |
b5996f11 | 210 | |
211 | new_skb = skb_copy(skb, GFP_ATOMIC); | |
13ac695e S |
212 | if (!new_skb) |
213 | return -ENOMEM; | |
b5996f11 | 214 | |
215 | dev_kfree_skb_any(skb); | |
13ac695e | 216 | *out_skb = new_skb; |
b5996f11 | 217 | buf_num = 1; |
b5996f11 | 218 | } else if (buf_num > ring_space(ring)) { |
13ac695e S |
219 | return -EBUSY; |
220 | } | |
221 | ||
222 | *bnum = buf_num; | |
223 | return 0; | |
224 | } | |
225 | ||
64353af6 S |
226 | static int hns_nic_maybe_stop_tso( |
227 | struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) | |
228 | { | |
229 | int i; | |
230 | int size; | |
231 | int buf_num; | |
232 | int frag_num; | |
233 | struct sk_buff *skb = *out_skb; | |
234 | struct sk_buff *new_skb = NULL; | |
235 | struct skb_frag_struct *frag; | |
236 | ||
237 | size = skb_headlen(skb); | |
238 | buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; | |
239 | ||
240 | frag_num = skb_shinfo(skb)->nr_frags; | |
241 | for (i = 0; i < frag_num; i++) { | |
242 | frag = &skb_shinfo(skb)->frags[i]; | |
243 | size = skb_frag_size(frag); | |
244 | buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; | |
245 | } | |
246 | ||
247 | if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { | |
248 | buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; | |
249 | if (ring_space(ring) < buf_num) | |
250 | return -EBUSY; | |
251 | /* manual split the send packet */ | |
252 | new_skb = skb_copy(skb, GFP_ATOMIC); | |
253 | if (!new_skb) | |
254 | return -ENOMEM; | |
255 | dev_kfree_skb_any(skb); | |
256 | *out_skb = new_skb; | |
257 | ||
258 | } else if (ring_space(ring) < buf_num) { | |
259 | return -EBUSY; | |
260 | } | |
261 | ||
262 | *bnum = buf_num; | |
263 | return 0; | |
264 | } | |
265 | ||
266 | static void fill_tso_desc(struct hnae_ring *ring, void *priv, | |
267 | int size, dma_addr_t dma, int frag_end, | |
268 | int buf_num, enum hns_desc_type type, int mtu) | |
269 | { | |
270 | int frag_buf_num; | |
271 | int sizeoflast; | |
272 | int k; | |
273 | ||
274 | frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; | |
275 | sizeoflast = size % BD_MAX_SEND_SIZE; | |
276 | sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE; | |
277 | ||
278 | /* when the frag size is bigger than hardware, split this frag */ | |
279 | for (k = 0; k < frag_buf_num; k++) | |
280 | fill_v2_desc(ring, priv, | |
281 | (k == frag_buf_num - 1) ? | |
282 | sizeoflast : BD_MAX_SEND_SIZE, | |
283 | dma + BD_MAX_SEND_SIZE * k, | |
284 | frag_end && (k == frag_buf_num - 1) ? 1 : 0, | |
285 | buf_num, | |
286 | (type == DESC_TYPE_SKB && !k) ? | |
287 | DESC_TYPE_SKB : DESC_TYPE_PAGE, | |
288 | mtu); | |
289 | } | |
290 | ||
13ac695e S |
291 | int hns_nic_net_xmit_hw(struct net_device *ndev, |
292 | struct sk_buff *skb, | |
293 | struct hns_nic_ring_data *ring_data) | |
294 | { | |
295 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
296 | struct device *dev = priv->dev; | |
297 | struct hnae_ring *ring = ring_data->ring; | |
298 | struct netdev_queue *dev_queue; | |
299 | struct skb_frag_struct *frag; | |
300 | int buf_num; | |
301 | int seg_num; | |
302 | dma_addr_t dma; | |
303 | int size, next_to_use; | |
304 | int i; | |
305 | ||
306 | switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { | |
307 | case -EBUSY: | |
b5996f11 | 308 | ring->stats.tx_busy++; |
309 | goto out_net_tx_busy; | |
13ac695e S |
310 | case -ENOMEM: |
311 | ring->stats.sw_err_cnt++; | |
312 | netdev_err(ndev, "no memory to xmit!\n"); | |
313 | goto out_err_tx_ok; | |
314 | default: | |
315 | break; | |
b5996f11 | 316 | } |
13ac695e S |
317 | |
318 | /* no. of segments (plus a header) */ | |
319 | seg_num = skb_shinfo(skb)->nr_frags + 1; | |
b5996f11 | 320 | next_to_use = ring->next_to_use; |
321 | ||
322 | /* fill the first part */ | |
323 | size = skb_headlen(skb); | |
324 | dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); | |
325 | if (dma_mapping_error(dev, dma)) { | |
326 | netdev_err(ndev, "TX head DMA map failed\n"); | |
327 | ring->stats.sw_err_cnt++; | |
328 | goto out_err_tx_ok; | |
329 | } | |
13ac695e S |
330 | priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, |
331 | buf_num, DESC_TYPE_SKB, ndev->mtu); | |
b5996f11 | 332 | |
333 | /* fill the fragments */ | |
13ac695e | 334 | for (i = 1; i < seg_num; i++) { |
b5996f11 | 335 | frag = &skb_shinfo(skb)->frags[i - 1]; |
336 | size = skb_frag_size(frag); | |
337 | dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); | |
338 | if (dma_mapping_error(dev, dma)) { | |
339 | netdev_err(ndev, "TX frag(%d) DMA map failed\n", i); | |
340 | ring->stats.sw_err_cnt++; | |
341 | goto out_map_frag_fail; | |
342 | } | |
13ac695e S |
343 | priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, |
344 | seg_num - 1 == i ? 1 : 0, buf_num, | |
345 | DESC_TYPE_PAGE, ndev->mtu); | |
b5996f11 | 346 | } |
347 | ||
348 | /*complete translate all packets*/ | |
349 | dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping); | |
350 | netdev_tx_sent_queue(dev_queue, skb->len); | |
351 | ||
352 | wmb(); /* commit all data before submit */ | |
353 | assert(skb->queue_mapping < priv->ae_handle->q_num); | |
354 | hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); | |
355 | ring->stats.tx_pkts++; | |
356 | ring->stats.tx_bytes += skb->len; | |
357 | ||
358 | return NETDEV_TX_OK; | |
359 | ||
360 | out_map_frag_fail: | |
361 | ||
13ac695e | 362 | while (ring->next_to_use != next_to_use) { |
b5996f11 | 363 | unfill_desc(ring); |
13ac695e S |
364 | if (ring->next_to_use != next_to_use) |
365 | dma_unmap_page(dev, | |
366 | ring->desc_cb[ring->next_to_use].dma, | |
367 | ring->desc_cb[ring->next_to_use].length, | |
368 | DMA_TO_DEVICE); | |
369 | else | |
370 | dma_unmap_single(dev, | |
371 | ring->desc_cb[next_to_use].dma, | |
372 | ring->desc_cb[next_to_use].length, | |
373 | DMA_TO_DEVICE); | |
b5996f11 | 374 | } |
375 | ||
b5996f11 | 376 | out_err_tx_ok: |
377 | ||
378 | dev_kfree_skb_any(skb); | |
379 | return NETDEV_TX_OK; | |
380 | ||
381 | out_net_tx_busy: | |
382 | ||
383 | netif_stop_subqueue(ndev, skb->queue_mapping); | |
384 | ||
385 | /* Herbert's original patch had: | |
386 | * smp_mb__after_netif_stop_queue(); | |
387 | * but since that doesn't exist yet, just open code it. | |
388 | */ | |
389 | smp_mb(); | |
390 | return NETDEV_TX_BUSY; | |
391 | } | |
392 | ||
393 | /** | |
394 | * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE | |
395 | * @data: pointer to the start of the headers | |
396 | * @max: total length of section to find headers in | |
397 | * | |
398 | * This function is meant to determine the length of headers that will | |
399 | * be recognized by hardware for LRO, GRO, and RSC offloads. The main | |
400 | * motivation of doing this is to only perform one pull for IPv4 TCP | |
401 | * packets so that we can do basic things like calculating the gso_size | |
402 | * based on the average data per packet. | |
403 | **/ | |
404 | static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag, | |
405 | unsigned int max_size) | |
406 | { | |
407 | unsigned char *network; | |
408 | u8 hlen; | |
409 | ||
410 | /* this should never happen, but better safe than sorry */ | |
411 | if (max_size < ETH_HLEN) | |
412 | return max_size; | |
413 | ||
414 | /* initialize network frame pointer */ | |
415 | network = data; | |
416 | ||
417 | /* set first protocol and move network header forward */ | |
418 | network += ETH_HLEN; | |
419 | ||
420 | /* handle any vlan tag if present */ | |
421 | if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S) | |
422 | == HNS_RX_FLAG_VLAN_PRESENT) { | |
423 | if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) | |
424 | return max_size; | |
425 | ||
426 | network += VLAN_HLEN; | |
427 | } | |
428 | ||
429 | /* handle L3 protocols */ | |
430 | if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) | |
431 | == HNS_RX_FLAG_L3ID_IPV4) { | |
432 | if ((typeof(max_size))(network - data) > | |
433 | (max_size - sizeof(struct iphdr))) | |
434 | return max_size; | |
435 | ||
436 | /* access ihl as a u8 to avoid unaligned access on ia64 */ | |
437 | hlen = (network[0] & 0x0F) << 2; | |
438 | ||
439 | /* verify hlen meets minimum size requirements */ | |
440 | if (hlen < sizeof(struct iphdr)) | |
441 | return network - data; | |
442 | ||
443 | /* record next protocol if header is present */ | |
444 | } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) | |
445 | == HNS_RX_FLAG_L3ID_IPV6) { | |
446 | if ((typeof(max_size))(network - data) > | |
447 | (max_size - sizeof(struct ipv6hdr))) | |
448 | return max_size; | |
449 | ||
450 | /* record next protocol */ | |
451 | hlen = sizeof(struct ipv6hdr); | |
452 | } else { | |
453 | return network - data; | |
454 | } | |
455 | ||
456 | /* relocate pointer to start of L4 header */ | |
457 | network += hlen; | |
458 | ||
459 | /* finally sort out TCP/UDP */ | |
460 | if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) | |
461 | == HNS_RX_FLAG_L4ID_TCP) { | |
462 | if ((typeof(max_size))(network - data) > | |
463 | (max_size - sizeof(struct tcphdr))) | |
464 | return max_size; | |
465 | ||
466 | /* access doff as a u8 to avoid unaligned access on ia64 */ | |
467 | hlen = (network[12] & 0xF0) >> 2; | |
468 | ||
469 | /* verify hlen meets minimum size requirements */ | |
470 | if (hlen < sizeof(struct tcphdr)) | |
471 | return network - data; | |
472 | ||
473 | network += hlen; | |
474 | } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) | |
475 | == HNS_RX_FLAG_L4ID_UDP) { | |
476 | if ((typeof(max_size))(network - data) > | |
477 | (max_size - sizeof(struct udphdr))) | |
478 | return max_size; | |
479 | ||
480 | network += sizeof(struct udphdr); | |
481 | } | |
482 | ||
483 | /* If everything has gone correctly network should be the | |
484 | * data section of the packet and will be the end of the header. | |
485 | * If not then it probably represents the end of the last recognized | |
486 | * header. | |
487 | */ | |
488 | if ((typeof(max_size))(network - data) < max_size) | |
489 | return network - data; | |
490 | else | |
491 | return max_size; | |
492 | } | |
493 | ||
494 | static void | |
495 | hns_nic_reuse_page(struct hnae_desc_cb *desc_cb, int tsize, int last_offset) | |
496 | { | |
497 | /* avoid re-using remote pages,flag default unreuse */ | |
498 | if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) { | |
499 | /* move offset up to the next cache line */ | |
500 | desc_cb->page_offset += tsize; | |
501 | ||
502 | if (desc_cb->page_offset <= last_offset) { | |
503 | desc_cb->reuse_flag = 1; | |
504 | /* bump ref count on page before it is given*/ | |
505 | get_page(desc_cb->priv); | |
506 | } | |
507 | } | |
508 | } | |
509 | ||
13ac695e S |
510 | static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum) |
511 | { | |
512 | *out_bnum = hnae_get_field(bnum_flag, | |
513 | HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1; | |
514 | } | |
515 | ||
516 | static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum) | |
517 | { | |
518 | *out_bnum = hnae_get_field(bnum_flag, | |
519 | HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S); | |
520 | } | |
521 | ||
b5996f11 | 522 | static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, |
523 | struct sk_buff **out_skb, int *out_bnum) | |
524 | { | |
525 | struct hnae_ring *ring = ring_data->ring; | |
526 | struct net_device *ndev = ring_data->napi.dev; | |
13ac695e | 527 | struct hns_nic_priv *priv = netdev_priv(ndev); |
b5996f11 | 528 | struct sk_buff *skb; |
529 | struct hnae_desc *desc; | |
530 | struct hnae_desc_cb *desc_cb; | |
531 | unsigned char *va; | |
532 | int bnum, length, size, i, truesize, last_offset; | |
533 | int pull_len; | |
534 | u32 bnum_flag; | |
535 | ||
536 | last_offset = hnae_page_size(ring) - hnae_buf_size(ring); | |
537 | desc = &ring->desc[ring->next_to_clean]; | |
538 | desc_cb = &ring->desc_cb[ring->next_to_clean]; | |
13ac695e S |
539 | |
540 | prefetch(desc); | |
541 | ||
b5996f11 | 542 | va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; |
543 | ||
13ac695e S |
544 | /* prefetch first cache line of first page */ |
545 | prefetch(va); | |
546 | #if L1_CACHE_BYTES < 128 | |
547 | prefetch(va + L1_CACHE_BYTES); | |
548 | #endif | |
549 | ||
550 | skb = *out_skb = napi_alloc_skb(&ring_data->napi, | |
551 | HNS_RX_HEAD_SIZE); | |
b5996f11 | 552 | if (unlikely(!skb)) { |
553 | netdev_err(ndev, "alloc rx skb fail\n"); | |
554 | ring->stats.sw_err_cnt++; | |
555 | return -ENOMEM; | |
556 | } | |
557 | ||
13ac695e S |
558 | length = le16_to_cpu(desc->rx.pkt_len); |
559 | bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); | |
560 | priv->ops.get_rxd_bnum(bnum_flag, &bnum); | |
561 | *out_bnum = bnum; | |
562 | ||
563 | /* we will be copying header into skb->data in | |
564 | * pskb_may_pull so it is in our interest to prefetch | |
565 | * it now to avoid a possible cache miss | |
566 | */ | |
567 | prefetchw(skb->data); | |
568 | ||
b5996f11 | 569 | if (length <= HNS_RX_HEAD_SIZE) { |
570 | memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); | |
571 | ||
572 | /* we can reuse buffer as-is, just make sure it is local */ | |
573 | if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) | |
574 | desc_cb->reuse_flag = 1; | |
575 | else /* this page cannot be reused so discard it */ | |
576 | put_page(desc_cb->priv); | |
577 | ||
578 | ring_ptr_move_fw(ring, next_to_clean); | |
579 | ||
580 | if (unlikely(bnum != 1)) { /* check err*/ | |
581 | *out_bnum = 1; | |
582 | goto out_bnum_err; | |
583 | } | |
584 | } else { | |
585 | ring->stats.seg_pkt_cnt++; | |
586 | ||
587 | pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE); | |
588 | memcpy(__skb_put(skb, pull_len), va, | |
589 | ALIGN(pull_len, sizeof(long))); | |
590 | ||
591 | size = le16_to_cpu(desc->rx.size); | |
592 | truesize = ALIGN(size, L1_CACHE_BYTES); | |
593 | skb_add_rx_frag(skb, 0, desc_cb->priv, | |
594 | desc_cb->page_offset + pull_len, | |
595 | size - pull_len, truesize - pull_len); | |
596 | ||
597 | hns_nic_reuse_page(desc_cb, truesize, last_offset); | |
598 | ring_ptr_move_fw(ring, next_to_clean); | |
599 | ||
600 | if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/ | |
601 | *out_bnum = 1; | |
602 | goto out_bnum_err; | |
603 | } | |
604 | for (i = 1; i < bnum; i++) { | |
605 | desc = &ring->desc[ring->next_to_clean]; | |
606 | desc_cb = &ring->desc_cb[ring->next_to_clean]; | |
607 | size = le16_to_cpu(desc->rx.size); | |
608 | truesize = ALIGN(size, L1_CACHE_BYTES); | |
609 | skb_add_rx_frag(skb, i, desc_cb->priv, | |
610 | desc_cb->page_offset, | |
611 | size, truesize); | |
612 | ||
613 | hns_nic_reuse_page(desc_cb, truesize, last_offset); | |
614 | ring_ptr_move_fw(ring, next_to_clean); | |
615 | } | |
616 | } | |
617 | ||
618 | /* check except process, free skb and jump the desc */ | |
619 | if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) { | |
620 | out_bnum_err: | |
621 | *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/ | |
622 | netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n", | |
623 | bnum, ring->max_desc_num_per_pkt, | |
624 | length, (int)MAX_SKB_FRAGS, | |
625 | ((u64 *)desc)[0], ((u64 *)desc)[1]); | |
626 | ring->stats.err_bd_num++; | |
627 | dev_kfree_skb_any(skb); | |
628 | return -EDOM; | |
629 | } | |
630 | ||
631 | bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); | |
632 | ||
633 | if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) { | |
634 | netdev_err(ndev, "no valid bd,%016llx,%016llx\n", | |
635 | ((u64 *)desc)[0], ((u64 *)desc)[1]); | |
636 | ring->stats.non_vld_descs++; | |
637 | dev_kfree_skb_any(skb); | |
638 | return -EINVAL; | |
639 | } | |
640 | ||
641 | if (unlikely((!desc->rx.pkt_len) || | |
642 | hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) { | |
b5996f11 | 643 | ring->stats.err_pkt_len++; |
644 | dev_kfree_skb_any(skb); | |
645 | return -EFAULT; | |
646 | } | |
647 | ||
648 | if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) { | |
b5996f11 | 649 | ring->stats.l2_err++; |
650 | dev_kfree_skb_any(skb); | |
651 | return -EFAULT; | |
652 | } | |
653 | ||
654 | ring->stats.rx_pkts++; | |
655 | ring->stats.rx_bytes += skb->len; | |
656 | ||
657 | if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) || | |
658 | hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) { | |
b5996f11 | 659 | ring->stats.l3l4_csum_err++; |
660 | return 0; | |
661 | } | |
662 | ||
663 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
664 | ||
665 | return 0; | |
666 | } | |
667 | ||
668 | static void | |
669 | hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count) | |
670 | { | |
671 | int i, ret; | |
672 | struct hnae_desc_cb res_cbs; | |
673 | struct hnae_desc_cb *desc_cb; | |
674 | struct hnae_ring *ring = ring_data->ring; | |
675 | struct net_device *ndev = ring_data->napi.dev; | |
676 | ||
677 | for (i = 0; i < cleand_count; i++) { | |
678 | desc_cb = &ring->desc_cb[ring->next_to_use]; | |
679 | if (desc_cb->reuse_flag) { | |
680 | ring->stats.reuse_pg_cnt++; | |
681 | hnae_reuse_buffer(ring, ring->next_to_use); | |
682 | } else { | |
683 | ret = hnae_reserve_buffer_map(ring, &res_cbs); | |
684 | if (ret) { | |
685 | ring->stats.sw_err_cnt++; | |
686 | netdev_err(ndev, "hnae reserve buffer map failed.\n"); | |
687 | break; | |
688 | } | |
689 | hnae_replace_buffer(ring, ring->next_to_use, &res_cbs); | |
690 | } | |
691 | ||
692 | ring_ptr_move_fw(ring, next_to_use); | |
693 | } | |
694 | ||
695 | wmb(); /* make all data has been write before submit */ | |
696 | writel_relaxed(i, ring->io_base + RCB_REG_HEAD); | |
697 | } | |
698 | ||
699 | /* return error number for error or number of desc left to take | |
700 | */ | |
701 | static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, | |
702 | struct sk_buff *skb) | |
703 | { | |
704 | struct net_device *ndev = ring_data->napi.dev; | |
705 | ||
706 | skb->protocol = eth_type_trans(skb, ndev); | |
707 | (void)napi_gro_receive(&ring_data->napi, skb); | |
708 | ndev->last_rx = jiffies; | |
709 | } | |
710 | ||
711 | static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, | |
712 | int budget, void *v) | |
713 | { | |
714 | struct hnae_ring *ring = ring_data->ring; | |
715 | struct sk_buff *skb; | |
716 | int num, bnum, ex_num; | |
717 | #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 | |
718 | int recv_pkts, recv_bds, clean_count, err; | |
719 | ||
720 | num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); | |
721 | rmb(); /* make sure num taken effect before the other data is touched */ | |
722 | ||
723 | recv_pkts = 0, recv_bds = 0, clean_count = 0; | |
724 | recv: | |
725 | while (recv_pkts < budget && recv_bds < num) { | |
726 | /* reuse or realloc buffers*/ | |
727 | if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { | |
728 | hns_nic_alloc_rx_buffers(ring_data, clean_count); | |
729 | clean_count = 0; | |
730 | } | |
731 | ||
732 | /* poll one pkg*/ | |
733 | err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum); | |
734 | if (unlikely(!skb)) /* this fault cannot be repaired */ | |
735 | break; | |
736 | ||
737 | recv_bds += bnum; | |
738 | clean_count += bnum; | |
739 | if (unlikely(err)) { /* do jump the err */ | |
740 | recv_pkts++; | |
741 | continue; | |
742 | } | |
743 | ||
744 | /* do update ip stack process*/ | |
745 | ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)( | |
746 | ring_data, skb); | |
747 | recv_pkts++; | |
748 | } | |
749 | ||
750 | /* make all data has been write before submit */ | |
b5996f11 | 751 | if (recv_pkts < budget) { |
752 | ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); | |
753 | rmb(); /*complete read rx ring bd number*/ | |
13ac695e S |
754 | if (ex_num > clean_count) { |
755 | num += ex_num - clean_count; | |
b5996f11 | 756 | goto recv; |
757 | } | |
758 | } | |
759 | ||
13ac695e S |
760 | /* make all data has been write before submit */ |
761 | if (clean_count > 0) | |
762 | hns_nic_alloc_rx_buffers(ring_data, clean_count); | |
763 | ||
b5996f11 | 764 | return recv_pkts; |
765 | } | |
766 | ||
767 | static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) | |
768 | { | |
769 | struct hnae_ring *ring = ring_data->ring; | |
770 | int num = 0; | |
771 | ||
772 | /* for hardware bug fixed */ | |
773 | num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); | |
774 | ||
775 | if (num > 0) { | |
776 | ring_data->ring->q->handle->dev->ops->toggle_ring_irq( | |
777 | ring_data->ring, 1); | |
778 | ||
779 | napi_schedule(&ring_data->napi); | |
780 | } | |
781 | } | |
782 | ||
783 | static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, | |
784 | int *bytes, int *pkts) | |
785 | { | |
786 | struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; | |
787 | ||
788 | (*pkts) += (desc_cb->type == DESC_TYPE_SKB); | |
789 | (*bytes) += desc_cb->length; | |
790 | /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ | |
791 | hnae_free_buffer_detach(ring, ring->next_to_clean); | |
792 | ||
793 | ring_ptr_move_fw(ring, next_to_clean); | |
794 | } | |
795 | ||
796 | static int is_valid_clean_head(struct hnae_ring *ring, int h) | |
797 | { | |
798 | int u = ring->next_to_use; | |
799 | int c = ring->next_to_clean; | |
800 | ||
801 | if (unlikely(h > ring->desc_num)) | |
802 | return 0; | |
803 | ||
804 | assert(u > 0 && u < ring->desc_num); | |
805 | assert(c > 0 && c < ring->desc_num); | |
806 | assert(u != c && h != c); /* must be checked before call this func */ | |
807 | ||
808 | return u > c ? (h > c && h <= u) : (h > c || h <= u); | |
809 | } | |
810 | ||
811 | /* netif_tx_lock will turn down the performance, set only when necessary */ | |
812 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
813 | #define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev) | |
814 | #define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev) | |
815 | #else | |
816 | #define NETIF_TX_LOCK(ndev) | |
817 | #define NETIF_TX_UNLOCK(ndev) | |
818 | #endif | |
819 | /* reclaim all desc in one budget | |
820 | * return error or number of desc left | |
821 | */ | |
822 | static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, | |
823 | int budget, void *v) | |
824 | { | |
825 | struct hnae_ring *ring = ring_data->ring; | |
826 | struct net_device *ndev = ring_data->napi.dev; | |
827 | struct netdev_queue *dev_queue; | |
828 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
829 | int head; | |
830 | int bytes, pkts; | |
831 | ||
832 | NETIF_TX_LOCK(ndev); | |
833 | ||
834 | head = readl_relaxed(ring->io_base + RCB_REG_HEAD); | |
835 | rmb(); /* make sure head is ready before touch any data */ | |
836 | ||
837 | if (is_ring_empty(ring) || head == ring->next_to_clean) { | |
838 | NETIF_TX_UNLOCK(ndev); | |
839 | return 0; /* no data to poll */ | |
840 | } | |
841 | ||
842 | if (!is_valid_clean_head(ring, head)) { | |
843 | netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, | |
844 | ring->next_to_use, ring->next_to_clean); | |
845 | ring->stats.io_err_cnt++; | |
846 | NETIF_TX_UNLOCK(ndev); | |
847 | return -EIO; | |
848 | } | |
849 | ||
850 | bytes = 0; | |
851 | pkts = 0; | |
852 | while (head != ring->next_to_clean) | |
853 | hns_nic_reclaim_one_desc(ring, &bytes, &pkts); | |
854 | ||
855 | NETIF_TX_UNLOCK(ndev); | |
856 | ||
857 | dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); | |
858 | netdev_tx_completed_queue(dev_queue, pkts, bytes); | |
859 | ||
13ac695e S |
860 | if (unlikely(priv->link && !netif_carrier_ok(ndev))) |
861 | netif_carrier_on(ndev); | |
862 | ||
b5996f11 | 863 | if (unlikely(pkts && netif_carrier_ok(ndev) && |
864 | (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) { | |
865 | /* Make sure that anybody stopping the queue after this | |
866 | * sees the new next_to_clean. | |
867 | */ | |
868 | smp_mb(); | |
869 | if (netif_tx_queue_stopped(dev_queue) && | |
870 | !test_bit(NIC_STATE_DOWN, &priv->state)) { | |
871 | netif_tx_wake_queue(dev_queue); | |
872 | ring->stats.restart_queue++; | |
873 | } | |
874 | } | |
875 | return 0; | |
876 | } | |
877 | ||
878 | static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) | |
879 | { | |
880 | struct hnae_ring *ring = ring_data->ring; | |
881 | int head = ring->next_to_clean; | |
882 | ||
883 | /* for hardware bug fixed */ | |
884 | head = readl_relaxed(ring->io_base + RCB_REG_HEAD); | |
885 | ||
886 | if (head != ring->next_to_clean) { | |
887 | ring_data->ring->q->handle->dev->ops->toggle_ring_irq( | |
888 | ring_data->ring, 1); | |
889 | ||
890 | napi_schedule(&ring_data->napi); | |
891 | } | |
892 | } | |
893 | ||
894 | static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) | |
895 | { | |
896 | struct hnae_ring *ring = ring_data->ring; | |
897 | struct net_device *ndev = ring_data->napi.dev; | |
898 | struct netdev_queue *dev_queue; | |
899 | int head; | |
900 | int bytes, pkts; | |
901 | ||
902 | NETIF_TX_LOCK(ndev); | |
903 | ||
904 | head = ring->next_to_use; /* ntu :soft setted ring position*/ | |
905 | bytes = 0; | |
906 | pkts = 0; | |
907 | while (head != ring->next_to_clean) | |
908 | hns_nic_reclaim_one_desc(ring, &bytes, &pkts); | |
909 | ||
910 | NETIF_TX_UNLOCK(ndev); | |
911 | ||
912 | dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); | |
913 | netdev_tx_reset_queue(dev_queue); | |
914 | } | |
915 | ||
916 | static int hns_nic_common_poll(struct napi_struct *napi, int budget) | |
917 | { | |
918 | struct hns_nic_ring_data *ring_data = | |
919 | container_of(napi, struct hns_nic_ring_data, napi); | |
920 | int clean_complete = ring_data->poll_one( | |
921 | ring_data, budget, ring_data->ex_process); | |
922 | ||
923 | if (clean_complete >= 0 && clean_complete < budget) { | |
924 | napi_complete(napi); | |
925 | ring_data->ring->q->handle->dev->ops->toggle_ring_irq( | |
926 | ring_data->ring, 0); | |
927 | ||
928 | ring_data->fini_process(ring_data); | |
929 | } | |
930 | ||
931 | return clean_complete; | |
932 | } | |
933 | ||
934 | static irqreturn_t hns_irq_handle(int irq, void *dev) | |
935 | { | |
936 | struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev; | |
937 | ||
938 | ring_data->ring->q->handle->dev->ops->toggle_ring_irq( | |
939 | ring_data->ring, 1); | |
940 | napi_schedule(&ring_data->napi); | |
941 | ||
942 | return IRQ_HANDLED; | |
943 | } | |
944 | ||
945 | /** | |
946 | *hns_nic_adjust_link - adjust net work mode by the phy stat or new param | |
947 | *@ndev: net device | |
948 | */ | |
949 | static void hns_nic_adjust_link(struct net_device *ndev) | |
950 | { | |
951 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
952 | struct hnae_handle *h = priv->ae_handle; | |
953 | ||
954 | h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex); | |
955 | } | |
956 | ||
957 | /** | |
958 | *hns_nic_init_phy - init phy | |
959 | *@ndev: net device | |
960 | *@h: ae handle | |
961 | * Return 0 on success, negative on failure | |
962 | */ | |
963 | int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) | |
964 | { | |
965 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
966 | struct phy_device *phy_dev = NULL; | |
967 | ||
968 | if (!h->phy_node) | |
969 | return 0; | |
970 | ||
971 | if (h->phy_if != PHY_INTERFACE_MODE_XGMII) | |
972 | phy_dev = of_phy_connect(ndev, h->phy_node, | |
973 | hns_nic_adjust_link, 0, h->phy_if); | |
974 | else | |
975 | phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if); | |
976 | ||
977 | if (unlikely(!phy_dev) || IS_ERR(phy_dev)) | |
978 | return !phy_dev ? -ENODEV : PTR_ERR(phy_dev); | |
979 | ||
980 | phy_dev->supported &= h->if_support; | |
981 | phy_dev->advertising = phy_dev->supported; | |
982 | ||
983 | if (h->phy_if == PHY_INTERFACE_MODE_XGMII) | |
984 | phy_dev->autoneg = false; | |
985 | ||
986 | priv->phy = phy_dev; | |
987 | ||
988 | return 0; | |
989 | } | |
990 | ||
991 | static int hns_nic_ring_open(struct net_device *netdev, int idx) | |
992 | { | |
993 | struct hns_nic_priv *priv = netdev_priv(netdev); | |
994 | struct hnae_handle *h = priv->ae_handle; | |
995 | ||
996 | napi_enable(&priv->ring_data[idx].napi); | |
997 | ||
998 | enable_irq(priv->ring_data[idx].ring->irq); | |
999 | h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0); | |
1000 | ||
1001 | return 0; | |
1002 | } | |
1003 | ||
1004 | static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p) | |
1005 | { | |
1006 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1007 | struct hnae_handle *h = priv->ae_handle; | |
1008 | struct sockaddr *mac_addr = p; | |
1009 | int ret; | |
1010 | ||
1011 | if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) | |
1012 | return -EADDRNOTAVAIL; | |
1013 | ||
1014 | ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data); | |
1015 | if (ret) { | |
1016 | netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret); | |
1017 | return ret; | |
1018 | } | |
1019 | ||
1020 | memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len); | |
1021 | ||
1022 | return 0; | |
1023 | } | |
1024 | ||
1025 | void hns_nic_update_stats(struct net_device *netdev) | |
1026 | { | |
1027 | struct hns_nic_priv *priv = netdev_priv(netdev); | |
1028 | struct hnae_handle *h = priv->ae_handle; | |
1029 | ||
1030 | h->dev->ops->update_stats(h, &netdev->stats); | |
1031 | } | |
1032 | ||
1033 | /* set mac addr if it is configed. or leave it to the AE driver */ | |
1034 | static void hns_init_mac_addr(struct net_device *ndev) | |
1035 | { | |
1036 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1037 | struct device_node *node = priv->dev->of_node; | |
1038 | const void *mac_addr_temp; | |
1039 | ||
1040 | mac_addr_temp = of_get_mac_address(node); | |
1041 | if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) { | |
1042 | memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len); | |
1043 | } else { | |
1044 | eth_hw_addr_random(ndev); | |
1045 | dev_warn(priv->dev, "No valid mac, use random mac %pM", | |
1046 | ndev->dev_addr); | |
1047 | } | |
1048 | } | |
1049 | ||
1050 | static void hns_nic_ring_close(struct net_device *netdev, int idx) | |
1051 | { | |
1052 | struct hns_nic_priv *priv = netdev_priv(netdev); | |
1053 | struct hnae_handle *h = priv->ae_handle; | |
1054 | ||
1055 | h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1); | |
1056 | disable_irq(priv->ring_data[idx].ring->irq); | |
1057 | ||
1058 | napi_disable(&priv->ring_data[idx].napi); | |
1059 | } | |
1060 | ||
13ac695e | 1061 | static void hns_set_irq_affinity(struct hns_nic_priv *priv) |
b5996f11 | 1062 | { |
1063 | struct hnae_handle *h = priv->ae_handle; | |
1064 | struct hns_nic_ring_data *rd; | |
1065 | int i; | |
b5996f11 | 1066 | int cpu; |
1067 | cpumask_t mask; | |
1068 | ||
13ac695e S |
1069 | /*diffrent irq banlance for 16core and 32core*/ |
1070 | if (h->q_num == num_possible_cpus()) { | |
1071 | for (i = 0; i < h->q_num * 2; i++) { | |
1072 | rd = &priv->ring_data[i]; | |
1073 | if (cpu_online(rd->queue_index)) { | |
1074 | cpumask_clear(&mask); | |
1075 | cpu = rd->queue_index; | |
1076 | cpumask_set_cpu(cpu, &mask); | |
1077 | (void)irq_set_affinity_hint(rd->ring->irq, | |
1078 | &mask); | |
1079 | } | |
1080 | } | |
1081 | } else { | |
1082 | for (i = 0; i < h->q_num; i++) { | |
1083 | rd = &priv->ring_data[i]; | |
1084 | if (cpu_online(rd->queue_index * 2)) { | |
1085 | cpumask_clear(&mask); | |
1086 | cpu = rd->queue_index * 2; | |
1087 | cpumask_set_cpu(cpu, &mask); | |
1088 | (void)irq_set_affinity_hint(rd->ring->irq, | |
1089 | &mask); | |
1090 | } | |
1091 | } | |
1092 | ||
1093 | for (i = h->q_num; i < h->q_num * 2; i++) { | |
1094 | rd = &priv->ring_data[i]; | |
1095 | if (cpu_online(rd->queue_index * 2 + 1)) { | |
1096 | cpumask_clear(&mask); | |
1097 | cpu = rd->queue_index * 2 + 1; | |
1098 | cpumask_set_cpu(cpu, &mask); | |
1099 | (void)irq_set_affinity_hint(rd->ring->irq, | |
1100 | &mask); | |
1101 | } | |
1102 | } | |
1103 | } | |
1104 | } | |
1105 | ||
1106 | static int hns_nic_init_irq(struct hns_nic_priv *priv) | |
1107 | { | |
1108 | struct hnae_handle *h = priv->ae_handle; | |
1109 | struct hns_nic_ring_data *rd; | |
1110 | int i; | |
1111 | int ret; | |
1112 | ||
b5996f11 | 1113 | for (i = 0; i < h->q_num * 2; i++) { |
1114 | rd = &priv->ring_data[i]; | |
1115 | ||
1116 | if (rd->ring->irq_init_flag == RCB_IRQ_INITED) | |
1117 | break; | |
1118 | ||
1119 | snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN, | |
1120 | "%s-%s%d", priv->netdev->name, | |
1121 | (i < h->q_num ? "tx" : "rx"), rd->queue_index); | |
1122 | ||
1123 | rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0'; | |
1124 | ||
1125 | ret = request_irq(rd->ring->irq, | |
1126 | hns_irq_handle, 0, rd->ring->ring_name, rd); | |
1127 | if (ret) { | |
1128 | netdev_err(priv->netdev, "request irq(%d) fail\n", | |
1129 | rd->ring->irq); | |
1130 | return ret; | |
1131 | } | |
1132 | disable_irq(rd->ring->irq); | |
1133 | rd->ring->irq_init_flag = RCB_IRQ_INITED; | |
b5996f11 | 1134 | } |
1135 | ||
13ac695e S |
1136 | /*set cpu affinity*/ |
1137 | hns_set_irq_affinity(priv); | |
1138 | ||
b5996f11 | 1139 | return 0; |
1140 | } | |
1141 | ||
1142 | static int hns_nic_net_up(struct net_device *ndev) | |
1143 | { | |
1144 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1145 | struct hnae_handle *h = priv->ae_handle; | |
1146 | int i, j, k; | |
1147 | int ret; | |
1148 | ||
1149 | ret = hns_nic_init_irq(priv); | |
1150 | if (ret != 0) { | |
1151 | netdev_err(ndev, "hns init irq failed! ret=%d\n", ret); | |
1152 | return ret; | |
1153 | } | |
1154 | ||
1155 | for (i = 0; i < h->q_num * 2; i++) { | |
1156 | ret = hns_nic_ring_open(ndev, i); | |
1157 | if (ret) | |
1158 | goto out_has_some_queues; | |
1159 | } | |
1160 | ||
1161 | for (k = 0; k < h->q_num; k++) | |
1162 | h->dev->ops->toggle_queue_status(h->qs[k], 1); | |
1163 | ||
1164 | ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr); | |
1165 | if (ret) | |
1166 | goto out_set_mac_addr_err; | |
1167 | ||
1168 | ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; | |
1169 | if (ret) | |
1170 | goto out_start_err; | |
1171 | ||
1172 | if (priv->phy) | |
1173 | phy_start(priv->phy); | |
1174 | ||
1175 | clear_bit(NIC_STATE_DOWN, &priv->state); | |
1176 | (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ); | |
1177 | ||
1178 | return 0; | |
1179 | ||
1180 | out_start_err: | |
1181 | netif_stop_queue(ndev); | |
1182 | out_set_mac_addr_err: | |
1183 | for (k = 0; k < h->q_num; k++) | |
1184 | h->dev->ops->toggle_queue_status(h->qs[k], 0); | |
1185 | out_has_some_queues: | |
1186 | for (j = i - 1; j >= 0; j--) | |
1187 | hns_nic_ring_close(ndev, j); | |
1188 | ||
1189 | set_bit(NIC_STATE_DOWN, &priv->state); | |
1190 | ||
1191 | return ret; | |
1192 | } | |
1193 | ||
1194 | static void hns_nic_net_down(struct net_device *ndev) | |
1195 | { | |
1196 | int i; | |
1197 | struct hnae_ae_ops *ops; | |
1198 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1199 | ||
1200 | if (test_and_set_bit(NIC_STATE_DOWN, &priv->state)) | |
1201 | return; | |
1202 | ||
1203 | (void)del_timer_sync(&priv->service_timer); | |
1204 | netif_tx_stop_all_queues(ndev); | |
1205 | netif_carrier_off(ndev); | |
1206 | netif_tx_disable(ndev); | |
1207 | priv->link = 0; | |
1208 | ||
1209 | if (priv->phy) | |
1210 | phy_stop(priv->phy); | |
1211 | ||
1212 | ops = priv->ae_handle->dev->ops; | |
1213 | ||
1214 | if (ops->stop) | |
1215 | ops->stop(priv->ae_handle); | |
1216 | ||
1217 | netif_tx_stop_all_queues(ndev); | |
1218 | ||
1219 | for (i = priv->ae_handle->q_num - 1; i >= 0; i--) { | |
1220 | hns_nic_ring_close(ndev, i); | |
1221 | hns_nic_ring_close(ndev, i + priv->ae_handle->q_num); | |
1222 | ||
1223 | /* clean tx buffers*/ | |
1224 | hns_nic_tx_clr_all_bufs(priv->ring_data + i); | |
1225 | } | |
1226 | } | |
1227 | ||
1228 | void hns_nic_net_reset(struct net_device *ndev) | |
1229 | { | |
1230 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1231 | struct hnae_handle *handle = priv->ae_handle; | |
1232 | ||
1233 | while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state)) | |
1234 | usleep_range(1000, 2000); | |
1235 | ||
1236 | (void)hnae_reinit_handle(handle); | |
1237 | ||
1238 | clear_bit(NIC_STATE_RESETTING, &priv->state); | |
1239 | } | |
1240 | ||
1241 | void hns_nic_net_reinit(struct net_device *netdev) | |
1242 | { | |
1243 | struct hns_nic_priv *priv = netdev_priv(netdev); | |
1244 | ||
1245 | priv->netdev->trans_start = jiffies; | |
1246 | while (test_and_set_bit(NIC_STATE_REINITING, &priv->state)) | |
1247 | usleep_range(1000, 2000); | |
1248 | ||
1249 | hns_nic_net_down(netdev); | |
1250 | hns_nic_net_reset(netdev); | |
1251 | (void)hns_nic_net_up(netdev); | |
1252 | clear_bit(NIC_STATE_REINITING, &priv->state); | |
1253 | } | |
1254 | ||
1255 | static int hns_nic_net_open(struct net_device *ndev) | |
1256 | { | |
1257 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1258 | struct hnae_handle *h = priv->ae_handle; | |
1259 | int ret; | |
1260 | ||
1261 | if (test_bit(NIC_STATE_TESTING, &priv->state)) | |
1262 | return -EBUSY; | |
1263 | ||
1264 | priv->link = 0; | |
1265 | netif_carrier_off(ndev); | |
1266 | ||
1267 | ret = netif_set_real_num_tx_queues(ndev, h->q_num); | |
1268 | if (ret < 0) { | |
1269 | netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n", | |
1270 | ret); | |
1271 | return ret; | |
1272 | } | |
1273 | ||
1274 | ret = netif_set_real_num_rx_queues(ndev, h->q_num); | |
1275 | if (ret < 0) { | |
1276 | netdev_err(ndev, | |
1277 | "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); | |
1278 | return ret; | |
1279 | } | |
1280 | ||
1281 | ret = hns_nic_net_up(ndev); | |
1282 | if (ret) { | |
1283 | netdev_err(ndev, | |
1284 | "hns net up fail, ret=%d!\n", ret); | |
1285 | return ret; | |
1286 | } | |
1287 | ||
1288 | return 0; | |
1289 | } | |
1290 | ||
1291 | static int hns_nic_net_stop(struct net_device *ndev) | |
1292 | { | |
1293 | hns_nic_net_down(ndev); | |
1294 | ||
1295 | return 0; | |
1296 | } | |
1297 | ||
1298 | static void hns_tx_timeout_reset(struct hns_nic_priv *priv); | |
1299 | static void hns_nic_net_timeout(struct net_device *ndev) | |
1300 | { | |
1301 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1302 | ||
1303 | hns_tx_timeout_reset(priv); | |
1304 | } | |
1305 | ||
1306 | static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, | |
1307 | int cmd) | |
1308 | { | |
1309 | struct hns_nic_priv *priv = netdev_priv(netdev); | |
1310 | struct phy_device *phy_dev = priv->phy; | |
1311 | ||
1312 | if (!netif_running(netdev)) | |
1313 | return -EINVAL; | |
1314 | ||
1315 | if (!phy_dev) | |
1316 | return -ENOTSUPP; | |
1317 | ||
1318 | return phy_mii_ioctl(phy_dev, ifr, cmd); | |
1319 | } | |
1320 | ||
1321 | /* use only for netconsole to poll with the device without interrupt */ | |
1322 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1323 | void hns_nic_poll_controller(struct net_device *ndev) | |
1324 | { | |
1325 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1326 | unsigned long flags; | |
1327 | int i; | |
1328 | ||
1329 | local_irq_save(flags); | |
1330 | for (i = 0; i < priv->ae_handle->q_num * 2; i++) | |
1331 | napi_schedule(&priv->ring_data[i].napi); | |
1332 | local_irq_restore(flags); | |
1333 | } | |
1334 | #endif | |
1335 | ||
1336 | static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, | |
1337 | struct net_device *ndev) | |
1338 | { | |
1339 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1340 | int ret; | |
1341 | ||
1342 | assert(skb->queue_mapping < ndev->ae_handle->q_num); | |
1343 | ret = hns_nic_net_xmit_hw(ndev, skb, | |
1344 | &tx_ring_data(priv, skb->queue_mapping)); | |
1345 | if (ret == NETDEV_TX_OK) { | |
1346 | ndev->trans_start = jiffies; | |
1347 | ndev->stats.tx_bytes += skb->len; | |
1348 | ndev->stats.tx_packets++; | |
1349 | } | |
1350 | return (netdev_tx_t)ret; | |
1351 | } | |
1352 | ||
1353 | static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu) | |
1354 | { | |
1355 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1356 | struct hnae_handle *h = priv->ae_handle; | |
1357 | int ret; | |
1358 | ||
1359 | /* MTU < 68 is an error and causes problems on some kernels */ | |
1360 | if (new_mtu < 68) | |
1361 | return -EINVAL; | |
1362 | ||
1363 | if (!h->dev->ops->set_mtu) | |
1364 | return -ENOTSUPP; | |
1365 | ||
1366 | if (netif_running(ndev)) { | |
1367 | (void)hns_nic_net_stop(ndev); | |
1368 | msleep(100); | |
1369 | ||
1370 | ret = h->dev->ops->set_mtu(h, new_mtu); | |
1371 | if (ret) | |
1372 | netdev_err(ndev, "set mtu fail, return value %d\n", | |
1373 | ret); | |
1374 | ||
1375 | if (hns_nic_net_open(ndev)) | |
1376 | netdev_err(ndev, "hns net open fail\n"); | |
1377 | } else { | |
1378 | ret = h->dev->ops->set_mtu(h, new_mtu); | |
1379 | } | |
1380 | ||
1381 | if (!ret) | |
1382 | ndev->mtu = new_mtu; | |
1383 | ||
1384 | return ret; | |
1385 | } | |
1386 | ||
1387 | /** | |
1388 | * nic_set_multicast_list - set mutl mac address | |
1389 | * @netdev: net device | |
1390 | * @p: mac address | |
1391 | * | |
1392 | * return void | |
1393 | */ | |
1394 | void hns_set_multicast_list(struct net_device *ndev) | |
1395 | { | |
1396 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1397 | struct hnae_handle *h = priv->ae_handle; | |
1398 | struct netdev_hw_addr *ha = NULL; | |
1399 | ||
1400 | if (!h) { | |
1401 | netdev_err(ndev, "hnae handle is null\n"); | |
1402 | return; | |
1403 | } | |
1404 | ||
1405 | if (h->dev->ops->set_mc_addr) { | |
1406 | netdev_for_each_mc_addr(ha, ndev) | |
1407 | if (h->dev->ops->set_mc_addr(h, ha->addr)) | |
1408 | netdev_err(ndev, "set multicast fail\n"); | |
1409 | } | |
1410 | } | |
1411 | ||
4568637f | 1412 | void hns_nic_set_rx_mode(struct net_device *ndev) |
1413 | { | |
1414 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1415 | struct hnae_handle *h = priv->ae_handle; | |
1416 | ||
1417 | if (h->dev->ops->set_promisc_mode) { | |
1418 | if (ndev->flags & IFF_PROMISC) | |
1419 | h->dev->ops->set_promisc_mode(h, 1); | |
1420 | else | |
1421 | h->dev->ops->set_promisc_mode(h, 0); | |
1422 | } | |
1423 | ||
1424 | hns_set_multicast_list(ndev); | |
1425 | } | |
1426 | ||
b5996f11 | 1427 | struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, |
1428 | struct rtnl_link_stats64 *stats) | |
1429 | { | |
1430 | int idx = 0; | |
1431 | u64 tx_bytes = 0; | |
1432 | u64 rx_bytes = 0; | |
1433 | u64 tx_pkts = 0; | |
1434 | u64 rx_pkts = 0; | |
1435 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1436 | struct hnae_handle *h = priv->ae_handle; | |
1437 | ||
1438 | for (idx = 0; idx < h->q_num; idx++) { | |
1439 | tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes; | |
1440 | tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts; | |
1441 | rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes; | |
1442 | rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts; | |
1443 | } | |
1444 | ||
1445 | stats->tx_bytes = tx_bytes; | |
1446 | stats->tx_packets = tx_pkts; | |
1447 | stats->rx_bytes = rx_bytes; | |
1448 | stats->rx_packets = rx_pkts; | |
1449 | ||
1450 | stats->rx_errors = ndev->stats.rx_errors; | |
1451 | stats->multicast = ndev->stats.multicast; | |
1452 | stats->rx_length_errors = ndev->stats.rx_length_errors; | |
1453 | stats->rx_crc_errors = ndev->stats.rx_crc_errors; | |
1454 | stats->rx_missed_errors = ndev->stats.rx_missed_errors; | |
1455 | ||
1456 | stats->tx_errors = ndev->stats.tx_errors; | |
1457 | stats->rx_dropped = ndev->stats.rx_dropped; | |
1458 | stats->tx_dropped = ndev->stats.tx_dropped; | |
1459 | stats->collisions = ndev->stats.collisions; | |
1460 | stats->rx_over_errors = ndev->stats.rx_over_errors; | |
1461 | stats->rx_frame_errors = ndev->stats.rx_frame_errors; | |
1462 | stats->rx_fifo_errors = ndev->stats.rx_fifo_errors; | |
1463 | stats->tx_aborted_errors = ndev->stats.tx_aborted_errors; | |
1464 | stats->tx_carrier_errors = ndev->stats.tx_carrier_errors; | |
1465 | stats->tx_fifo_errors = ndev->stats.tx_fifo_errors; | |
1466 | stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors; | |
1467 | stats->tx_window_errors = ndev->stats.tx_window_errors; | |
1468 | stats->rx_compressed = ndev->stats.rx_compressed; | |
1469 | stats->tx_compressed = ndev->stats.tx_compressed; | |
1470 | ||
1471 | return stats; | |
1472 | } | |
1473 | ||
1474 | static const struct net_device_ops hns_nic_netdev_ops = { | |
1475 | .ndo_open = hns_nic_net_open, | |
1476 | .ndo_stop = hns_nic_net_stop, | |
1477 | .ndo_start_xmit = hns_nic_net_xmit, | |
1478 | .ndo_tx_timeout = hns_nic_net_timeout, | |
1479 | .ndo_set_mac_address = hns_nic_net_set_mac_address, | |
1480 | .ndo_change_mtu = hns_nic_change_mtu, | |
1481 | .ndo_do_ioctl = hns_nic_do_ioctl, | |
1482 | .ndo_get_stats64 = hns_nic_get_stats64, | |
1483 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1484 | .ndo_poll_controller = hns_nic_poll_controller, | |
1485 | #endif | |
4568637f | 1486 | .ndo_set_rx_mode = hns_nic_set_rx_mode, |
b5996f11 | 1487 | }; |
1488 | ||
1489 | static void hns_nic_update_link_status(struct net_device *netdev) | |
1490 | { | |
1491 | struct hns_nic_priv *priv = netdev_priv(netdev); | |
1492 | ||
1493 | struct hnae_handle *h = priv->ae_handle; | |
1494 | int state = 1; | |
1495 | ||
1496 | if (priv->phy) { | |
1497 | if (!genphy_update_link(priv->phy)) | |
1498 | state = priv->phy->link; | |
1499 | else | |
1500 | state = 0; | |
1501 | } | |
1502 | state = state && h->dev->ops->get_status(h); | |
1503 | ||
1504 | if (state != priv->link) { | |
1505 | if (state) { | |
1506 | netif_carrier_on(netdev); | |
1507 | netif_tx_wake_all_queues(netdev); | |
1508 | netdev_info(netdev, "link up\n"); | |
1509 | } else { | |
1510 | netif_carrier_off(netdev); | |
1511 | netdev_info(netdev, "link down\n"); | |
1512 | } | |
1513 | priv->link = state; | |
1514 | } | |
1515 | } | |
1516 | ||
1517 | /* for dumping key regs*/ | |
1518 | static void hns_nic_dump(struct hns_nic_priv *priv) | |
1519 | { | |
1520 | struct hnae_handle *h = priv->ae_handle; | |
1521 | struct hnae_ae_ops *ops = h->dev->ops; | |
1522 | u32 *data, reg_num, i; | |
1523 | ||
1524 | if (ops->get_regs_len && ops->get_regs) { | |
1525 | reg_num = ops->get_regs_len(priv->ae_handle); | |
1526 | reg_num = (reg_num + 3ul) & ~3ul; | |
1527 | data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL); | |
1528 | if (data) { | |
1529 | ops->get_regs(priv->ae_handle, data); | |
1530 | for (i = 0; i < reg_num; i += 4) | |
1531 | pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", | |
1532 | i, data[i], data[i + 1], | |
1533 | data[i + 2], data[i + 3]); | |
1534 | kfree(data); | |
1535 | } | |
1536 | } | |
1537 | ||
1538 | for (i = 0; i < h->q_num; i++) { | |
1539 | pr_info("tx_queue%d_next_to_clean:%d\n", | |
1540 | i, h->qs[i]->tx_ring.next_to_clean); | |
1541 | pr_info("tx_queue%d_next_to_use:%d\n", | |
1542 | i, h->qs[i]->tx_ring.next_to_use); | |
1543 | pr_info("rx_queue%d_next_to_clean:%d\n", | |
1544 | i, h->qs[i]->rx_ring.next_to_clean); | |
1545 | pr_info("rx_queue%d_next_to_use:%d\n", | |
1546 | i, h->qs[i]->rx_ring.next_to_use); | |
1547 | } | |
1548 | } | |
1549 | ||
1550 | /* for resetting suntask*/ | |
1551 | static void hns_nic_reset_subtask(struct hns_nic_priv *priv) | |
1552 | { | |
1553 | enum hnae_port_type type = priv->ae_handle->port_type; | |
1554 | ||
1555 | if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state)) | |
1556 | return; | |
1557 | clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state); | |
1558 | ||
1559 | /* If we're already down, removing or resetting, just bail */ | |
1560 | if (test_bit(NIC_STATE_DOWN, &priv->state) || | |
1561 | test_bit(NIC_STATE_REMOVING, &priv->state) || | |
1562 | test_bit(NIC_STATE_RESETTING, &priv->state)) | |
1563 | return; | |
1564 | ||
1565 | hns_nic_dump(priv); | |
13ac695e S |
1566 | netdev_info(priv->netdev, "try to reset %s port!\n", |
1567 | (type == HNAE_PORT_DEBUG ? "debug" : "service")); | |
b5996f11 | 1568 | |
1569 | rtnl_lock(); | |
90a505b9 | 1570 | /* put off any impending NetWatchDogTimeout */ |
1571 | priv->netdev->trans_start = jiffies; | |
1572 | ||
13ac695e | 1573 | if (type == HNAE_PORT_DEBUG) { |
b5996f11 | 1574 | hns_nic_net_reinit(priv->netdev); |
13ac695e S |
1575 | } else { |
1576 | netif_carrier_off(priv->netdev); | |
1577 | netif_tx_disable(priv->netdev); | |
1578 | } | |
b5996f11 | 1579 | rtnl_unlock(); |
1580 | } | |
1581 | ||
1582 | /* for doing service complete*/ | |
1583 | static void hns_nic_service_event_complete(struct hns_nic_priv *priv) | |
1584 | { | |
13ac695e | 1585 | WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); |
b5996f11 | 1586 | |
1587 | smp_mb__before_atomic(); | |
1588 | clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); | |
1589 | } | |
1590 | ||
1591 | static void hns_nic_service_task(struct work_struct *work) | |
1592 | { | |
1593 | struct hns_nic_priv *priv | |
1594 | = container_of(work, struct hns_nic_priv, service_task); | |
1595 | struct hnae_handle *h = priv->ae_handle; | |
1596 | ||
1597 | hns_nic_update_link_status(priv->netdev); | |
1598 | h->dev->ops->update_led_status(h); | |
1599 | hns_nic_update_stats(priv->netdev); | |
1600 | ||
1601 | hns_nic_reset_subtask(priv); | |
1602 | hns_nic_service_event_complete(priv); | |
1603 | } | |
1604 | ||
1605 | static void hns_nic_task_schedule(struct hns_nic_priv *priv) | |
1606 | { | |
1607 | if (!test_bit(NIC_STATE_DOWN, &priv->state) && | |
1608 | !test_bit(NIC_STATE_REMOVING, &priv->state) && | |
1609 | !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state)) | |
1610 | (void)schedule_work(&priv->service_task); | |
1611 | } | |
1612 | ||
1613 | static void hns_nic_service_timer(unsigned long data) | |
1614 | { | |
1615 | struct hns_nic_priv *priv = (struct hns_nic_priv *)data; | |
1616 | ||
1617 | (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ); | |
1618 | ||
1619 | hns_nic_task_schedule(priv); | |
1620 | } | |
1621 | ||
1622 | /** | |
1623 | * hns_tx_timeout_reset - initiate reset due to Tx timeout | |
1624 | * @priv: driver private struct | |
1625 | **/ | |
1626 | static void hns_tx_timeout_reset(struct hns_nic_priv *priv) | |
1627 | { | |
1628 | /* Do the reset outside of interrupt context */ | |
1629 | if (!test_bit(NIC_STATE_DOWN, &priv->state)) { | |
1630 | set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state); | |
1631 | netdev_warn(priv->netdev, | |
1632 | "initiating reset due to tx timeout(%llu,0x%lx)\n", | |
1633 | priv->tx_timeout_count, priv->state); | |
1634 | priv->tx_timeout_count++; | |
1635 | hns_nic_task_schedule(priv); | |
1636 | } | |
1637 | } | |
1638 | ||
1639 | static int hns_nic_init_ring_data(struct hns_nic_priv *priv) | |
1640 | { | |
1641 | struct hnae_handle *h = priv->ae_handle; | |
1642 | struct hns_nic_ring_data *rd; | |
1643 | int i; | |
1644 | ||
1645 | if (h->q_num > NIC_MAX_Q_PER_VF) { | |
1646 | netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num); | |
1647 | return -EINVAL; | |
1648 | } | |
1649 | ||
1650 | priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2, | |
1651 | GFP_KERNEL); | |
1652 | if (!priv->ring_data) | |
1653 | return -ENOMEM; | |
1654 | ||
1655 | for (i = 0; i < h->q_num; i++) { | |
1656 | rd = &priv->ring_data[i]; | |
1657 | rd->queue_index = i; | |
1658 | rd->ring = &h->qs[i]->tx_ring; | |
1659 | rd->poll_one = hns_nic_tx_poll_one; | |
1660 | rd->fini_process = hns_nic_tx_fini_pro; | |
1661 | ||
1662 | netif_napi_add(priv->netdev, &rd->napi, | |
1663 | hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); | |
1664 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; | |
1665 | } | |
1666 | for (i = h->q_num; i < h->q_num * 2; i++) { | |
1667 | rd = &priv->ring_data[i]; | |
1668 | rd->queue_index = i - h->q_num; | |
1669 | rd->ring = &h->qs[i - h->q_num]->rx_ring; | |
1670 | rd->poll_one = hns_nic_rx_poll_one; | |
1671 | rd->ex_process = hns_nic_rx_up_pro; | |
1672 | rd->fini_process = hns_nic_rx_fini_pro; | |
1673 | ||
1674 | netif_napi_add(priv->netdev, &rd->napi, | |
1675 | hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); | |
1676 | rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; | |
1677 | } | |
1678 | ||
1679 | return 0; | |
1680 | } | |
1681 | ||
1682 | static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv) | |
1683 | { | |
1684 | struct hnae_handle *h = priv->ae_handle; | |
1685 | int i; | |
1686 | ||
1687 | for (i = 0; i < h->q_num * 2; i++) { | |
1688 | netif_napi_del(&priv->ring_data[i].napi); | |
1689 | if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) { | |
13ac695e S |
1690 | (void)irq_set_affinity_hint( |
1691 | priv->ring_data[i].ring->irq, | |
1692 | NULL); | |
b5996f11 | 1693 | free_irq(priv->ring_data[i].ring->irq, |
1694 | &priv->ring_data[i]); | |
1695 | } | |
1696 | ||
1697 | priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED; | |
1698 | } | |
1699 | kfree(priv->ring_data); | |
1700 | } | |
1701 | ||
13ac695e S |
1702 | static void hns_nic_set_priv_ops(struct net_device *netdev) |
1703 | { | |
1704 | struct hns_nic_priv *priv = netdev_priv(netdev); | |
64353af6 | 1705 | struct hnae_handle *h = priv->ae_handle; |
13ac695e S |
1706 | |
1707 | if (AE_IS_VER1(priv->enet_ver)) { | |
1708 | priv->ops.fill_desc = fill_desc; | |
1709 | priv->ops.get_rxd_bnum = get_rx_desc_bnum; | |
1710 | priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; | |
1711 | } else { | |
1712 | priv->ops.get_rxd_bnum = get_v2rx_desc_bnum; | |
64353af6 S |
1713 | if ((netdev->features & NETIF_F_TSO) || |
1714 | (netdev->features & NETIF_F_TSO6)) { | |
1715 | priv->ops.fill_desc = fill_tso_desc; | |
1716 | priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; | |
1717 | /* This chip only support 7*4096 */ | |
1718 | netif_set_gso_max_size(netdev, 7 * 4096); | |
1719 | h->dev->ops->set_tso_stats(h, 1); | |
1720 | } else { | |
1721 | priv->ops.fill_desc = fill_v2_desc; | |
1722 | priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; | |
1723 | } | |
13ac695e S |
1724 | } |
1725 | } | |
1726 | ||
b5996f11 | 1727 | static int hns_nic_try_get_ae(struct net_device *ndev) |
1728 | { | |
1729 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1730 | struct hnae_handle *h; | |
1731 | int ret; | |
1732 | ||
1733 | h = hnae_get_handle(&priv->netdev->dev, | |
1734 | priv->ae_name, priv->port_id, NULL); | |
1735 | if (IS_ERR_OR_NULL(h)) { | |
1736 | ret = PTR_ERR(h); | |
1737 | dev_dbg(priv->dev, "has not handle, register notifier!\n"); | |
1738 | goto out; | |
1739 | } | |
1740 | priv->ae_handle = h; | |
1741 | ||
1742 | ret = hns_nic_init_phy(ndev, h); | |
1743 | if (ret) { | |
1744 | dev_err(priv->dev, "probe phy device fail!\n"); | |
1745 | goto out_init_phy; | |
1746 | } | |
1747 | ||
1748 | ret = hns_nic_init_ring_data(priv); | |
1749 | if (ret) { | |
1750 | ret = -ENOMEM; | |
1751 | goto out_init_ring_data; | |
1752 | } | |
1753 | ||
13ac695e S |
1754 | hns_nic_set_priv_ops(ndev); |
1755 | ||
b5996f11 | 1756 | ret = register_netdev(ndev); |
1757 | if (ret) { | |
1758 | dev_err(priv->dev, "probe register netdev fail!\n"); | |
1759 | goto out_reg_ndev_fail; | |
1760 | } | |
1761 | return 0; | |
1762 | ||
1763 | out_reg_ndev_fail: | |
1764 | hns_nic_uninit_ring_data(priv); | |
1765 | priv->ring_data = NULL; | |
1766 | out_init_phy: | |
1767 | out_init_ring_data: | |
1768 | hnae_put_handle(priv->ae_handle); | |
1769 | priv->ae_handle = NULL; | |
1770 | out: | |
1771 | return ret; | |
1772 | } | |
1773 | ||
1774 | static int hns_nic_notifier_action(struct notifier_block *nb, | |
1775 | unsigned long action, void *data) | |
1776 | { | |
1777 | struct hns_nic_priv *priv = | |
1778 | container_of(nb, struct hns_nic_priv, notifier_block); | |
1779 | ||
1780 | assert(action == HNAE_AE_REGISTER); | |
1781 | ||
1782 | if (!hns_nic_try_get_ae(priv->netdev)) { | |
1783 | hnae_unregister_notifier(&priv->notifier_block); | |
1784 | priv->notifier_block.notifier_call = NULL; | |
1785 | } | |
1786 | return 0; | |
1787 | } | |
1788 | ||
1789 | static int hns_nic_dev_probe(struct platform_device *pdev) | |
1790 | { | |
1791 | struct device *dev = &pdev->dev; | |
1792 | struct net_device *ndev; | |
1793 | struct hns_nic_priv *priv; | |
1794 | struct device_node *node = dev->of_node; | |
1795 | int ret; | |
1796 | ||
1797 | ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF); | |
1798 | if (!ndev) | |
1799 | return -ENOMEM; | |
1800 | ||
1801 | platform_set_drvdata(pdev, ndev); | |
1802 | ||
1803 | priv = netdev_priv(ndev); | |
1804 | priv->dev = dev; | |
1805 | priv->netdev = ndev; | |
1806 | ||
13ac695e | 1807 | if (of_device_is_compatible(node, "hisilicon,hns-nic-v1")) |
b5996f11 | 1808 | priv->enet_ver = AE_VERSION_1; |
13ac695e S |
1809 | else |
1810 | priv->enet_ver = AE_VERSION_2; | |
b5996f11 | 1811 | |
1812 | ret = of_property_read_string(node, "ae-name", &priv->ae_name); | |
1813 | if (ret) | |
1814 | goto out_read_string_fail; | |
1815 | ||
1816 | ret = of_property_read_u32(node, "port-id", &priv->port_id); | |
1817 | if (ret) | |
1818 | goto out_read_string_fail; | |
1819 | ||
1820 | hns_init_mac_addr(ndev); | |
1821 | ||
1822 | ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT; | |
1823 | ndev->priv_flags |= IFF_UNICAST_FLT; | |
1824 | ndev->netdev_ops = &hns_nic_netdev_ops; | |
1825 | hns_ethtool_set_ops(ndev); | |
13ac695e | 1826 | |
b5996f11 | 1827 | ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
1828 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | | |
1829 | NETIF_F_GRO; | |
1830 | ndev->vlan_features |= | |
1831 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; | |
1832 | ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; | |
1833 | ||
13ac695e S |
1834 | switch (priv->enet_ver) { |
1835 | case AE_VERSION_2: | |
64353af6 | 1836 | ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; |
13ac695e S |
1837 | ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
1838 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | | |
64353af6 | 1839 | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; |
13ac695e S |
1840 | break; |
1841 | default: | |
1842 | break; | |
1843 | } | |
1844 | ||
b5996f11 | 1845 | SET_NETDEV_DEV(ndev, dev); |
1846 | ||
1847 | if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) | |
1848 | dev_dbg(dev, "set mask to 64bit\n"); | |
1849 | else | |
1850 | dev_err(dev, "set mask to 32bit fail!\n"); | |
1851 | ||
1852 | /* carrier off reporting is important to ethtool even BEFORE open */ | |
1853 | netif_carrier_off(ndev); | |
1854 | ||
1855 | setup_timer(&priv->service_timer, hns_nic_service_timer, | |
1856 | (unsigned long)priv); | |
1857 | INIT_WORK(&priv->service_task, hns_nic_service_task); | |
1858 | ||
1859 | set_bit(NIC_STATE_SERVICE_INITED, &priv->state); | |
1860 | clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); | |
1861 | set_bit(NIC_STATE_DOWN, &priv->state); | |
1862 | ||
1863 | if (hns_nic_try_get_ae(priv->netdev)) { | |
1864 | priv->notifier_block.notifier_call = hns_nic_notifier_action; | |
1865 | ret = hnae_register_notifier(&priv->notifier_block); | |
1866 | if (ret) { | |
1867 | dev_err(dev, "register notifier fail!\n"); | |
1868 | goto out_notify_fail; | |
1869 | } | |
1870 | dev_dbg(dev, "has not handle, register notifier!\n"); | |
1871 | } | |
1872 | ||
1873 | return 0; | |
1874 | ||
1875 | out_notify_fail: | |
1876 | (void)cancel_work_sync(&priv->service_task); | |
1877 | out_read_string_fail: | |
1878 | free_netdev(ndev); | |
1879 | return ret; | |
1880 | } | |
1881 | ||
1882 | static int hns_nic_dev_remove(struct platform_device *pdev) | |
1883 | { | |
1884 | struct net_device *ndev = platform_get_drvdata(pdev); | |
1885 | struct hns_nic_priv *priv = netdev_priv(ndev); | |
1886 | ||
1887 | if (ndev->reg_state != NETREG_UNINITIALIZED) | |
1888 | unregister_netdev(ndev); | |
1889 | ||
1890 | if (priv->ring_data) | |
1891 | hns_nic_uninit_ring_data(priv); | |
1892 | priv->ring_data = NULL; | |
1893 | ||
1894 | if (priv->phy) | |
1895 | phy_disconnect(priv->phy); | |
1896 | priv->phy = NULL; | |
1897 | ||
1898 | if (!IS_ERR_OR_NULL(priv->ae_handle)) | |
1899 | hnae_put_handle(priv->ae_handle); | |
1900 | priv->ae_handle = NULL; | |
1901 | if (priv->notifier_block.notifier_call) | |
1902 | hnae_unregister_notifier(&priv->notifier_block); | |
1903 | priv->notifier_block.notifier_call = NULL; | |
1904 | ||
1905 | set_bit(NIC_STATE_REMOVING, &priv->state); | |
1906 | (void)cancel_work_sync(&priv->service_task); | |
1907 | ||
1908 | free_netdev(ndev); | |
1909 | return 0; | |
1910 | } | |
1911 | ||
1912 | static const struct of_device_id hns_enet_of_match[] = { | |
1913 | {.compatible = "hisilicon,hns-nic-v1",}, | |
1914 | {.compatible = "hisilicon,hns-nic-v2",}, | |
1915 | {}, | |
1916 | }; | |
1917 | ||
1918 | MODULE_DEVICE_TABLE(of, hns_enet_of_match); | |
1919 | ||
1920 | static struct platform_driver hns_nic_dev_driver = { | |
1921 | .driver = { | |
1922 | .name = "hns-nic", | |
b5996f11 | 1923 | .of_match_table = hns_enet_of_match, |
1924 | }, | |
1925 | .probe = hns_nic_dev_probe, | |
1926 | .remove = hns_nic_dev_remove, | |
1927 | }; | |
1928 | ||
1929 | module_platform_driver(hns_nic_dev_driver); | |
1930 | ||
1931 | MODULE_DESCRIPTION("HISILICON HNS Ethernet driver"); | |
1932 | MODULE_AUTHOR("Hisilicon, Inc."); | |
1933 | MODULE_LICENSE("GPL"); | |
1934 | MODULE_ALIAS("platform:hns-nic"); |