net: hns: delete redundancy ring enable operations
[deliverable/linux.git] / drivers / net / ethernet / hisilicon / hns / hns_enet.c
CommitLineData
b5996f11 1/*
2 * Copyright (c) 2014-2015 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/clk.h>
11#include <linux/cpumask.h>
12#include <linux/etherdevice.h>
13#include <linux/if_vlan.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/module.h>
19#include <linux/phy.h>
20#include <linux/platform_device.h>
21#include <linux/skbuff.h>
22
23#include "hnae.h"
24#include "hns_enet.h"
25
26#define NIC_MAX_Q_PER_VF 16
27#define HNS_NIC_TX_TIMEOUT (5 * HZ)
28
29#define SERVICE_TIMER_HZ (1 * HZ)
30
31#define NIC_TX_CLEAN_MAX_NUM 256
32#define NIC_RX_CLEAN_MAX_NUM 64
33
b5996f11 34#define RCB_IRQ_NOT_INITED 0
35#define RCB_IRQ_INITED 1
9cbe9fd5 36#define HNS_BUFFER_SIZE_2048 2048
b5996f11 37
13ac695e
S
38#define BD_MAX_SEND_SIZE 8191
39#define SKB_TMP_LEN(SKB) \
40 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
41
42static void fill_v2_desc(struct hnae_ring *ring, void *priv,
43 int size, dma_addr_t dma, int frag_end,
44 int buf_num, enum hns_desc_type type, int mtu)
45{
46 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
47 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
48 struct iphdr *iphdr;
49 struct ipv6hdr *ipv6hdr;
50 struct sk_buff *skb;
13ac695e
S
51 __be16 protocol;
52 u8 bn_pid = 0;
53 u8 rrcfv = 0;
54 u8 ip_offset = 0;
55 u8 tvsvsn = 0;
56 u16 mss = 0;
57 u8 l4_len = 0;
58 u16 paylen = 0;
59
60 desc_cb->priv = priv;
61 desc_cb->length = size;
62 desc_cb->dma = dma;
63 desc_cb->type = type;
64
65 desc->addr = cpu_to_le64(dma);
66 desc->tx.send_size = cpu_to_le16((u16)size);
67
f8a1a636 68 /* config bd buffer end */
13ac695e
S
69 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
70 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
71
f8a1a636
SL
72 /* fill port_id in the tx bd for sending management pkts */
73 hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
74 HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
75
13ac695e
S
76 if (type == DESC_TYPE_SKB) {
77 skb = (struct sk_buff *)priv;
78
79 if (skb->ip_summed == CHECKSUM_PARTIAL) {
80 skb_reset_mac_len(skb);
81 protocol = skb->protocol;
82 ip_offset = ETH_HLEN;
83
84 if (protocol == htons(ETH_P_8021Q)) {
85 ip_offset += VLAN_HLEN;
86 protocol = vlan_get_protocol(skb);
87 skb->protocol = protocol;
88 }
89
90 if (skb->protocol == htons(ETH_P_IP)) {
91 iphdr = ip_hdr(skb);
92 hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
93 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
94
95 /* check for tcp/udp header */
0b51b1dc
DH
96 if (iphdr->protocol == IPPROTO_TCP &&
97 skb_is_gso(skb)) {
13ac695e
S
98 hnae_set_bit(tvsvsn,
99 HNSV2_TXD_TSE_B, 1);
13ac695e 100 l4_len = tcp_hdrlen(skb);
0b51b1dc
DH
101 mss = skb_shinfo(skb)->gso_size;
102 paylen = skb->len - SKB_TMP_LEN(skb);
13ac695e
S
103 }
104 } else if (skb->protocol == htons(ETH_P_IPV6)) {
105 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
106 ipv6hdr = ipv6_hdr(skb);
107 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
108
109 /* check for tcp/udp header */
0b51b1dc
DH
110 if (ipv6hdr->nexthdr == IPPROTO_TCP &&
111 skb_is_gso(skb) && skb_is_gso_v6(skb)) {
13ac695e
S
112 hnae_set_bit(tvsvsn,
113 HNSV2_TXD_TSE_B, 1);
13ac695e 114 l4_len = tcp_hdrlen(skb);
0b51b1dc
DH
115 mss = skb_shinfo(skb)->gso_size;
116 paylen = skb->len - SKB_TMP_LEN(skb);
13ac695e
S
117 }
118 }
119 desc->tx.ip_offset = ip_offset;
120 desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
121 desc->tx.mss = cpu_to_le16(mss);
122 desc->tx.l4_len = l4_len;
123 desc->tx.paylen = cpu_to_le16(paylen);
124 }
125 }
126
127 hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
128
129 desc->tx.bn_pid = bn_pid;
130 desc->tx.ra_ri_cs_fe_vld = rrcfv;
131
132 ring_ptr_move_fw(ring, next_to_use);
133}
134
63434888
KY
135static const struct acpi_device_id hns_enet_acpi_match[] = {
136 { "HISI00C1", 0 },
137 { "HISI00C2", 0 },
138 { },
139};
140MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
141
b5996f11 142static void fill_desc(struct hnae_ring *ring, void *priv,
143 int size, dma_addr_t dma, int frag_end,
13ac695e 144 int buf_num, enum hns_desc_type type, int mtu)
b5996f11 145{
146 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
147 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
148 struct sk_buff *skb;
149 __be16 protocol;
150 u32 ip_offset;
151 u32 asid_bufnum_pid = 0;
152 u32 flag_ipoffset = 0;
153
154 desc_cb->priv = priv;
155 desc_cb->length = size;
156 desc_cb->dma = dma;
157 desc_cb->type = type;
158
159 desc->addr = cpu_to_le64(dma);
160 desc->tx.send_size = cpu_to_le16((u16)size);
161
162 /*config bd buffer end */
163 flag_ipoffset |= 1 << HNS_TXD_VLD_B;
164
165 asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
166
167 if (type == DESC_TYPE_SKB) {
168 skb = (struct sk_buff *)priv;
169
170 if (skb->ip_summed == CHECKSUM_PARTIAL) {
171 protocol = skb->protocol;
172 ip_offset = ETH_HLEN;
173
174 /*if it is a SW VLAN check the next protocol*/
175 if (protocol == htons(ETH_P_8021Q)) {
176 ip_offset += VLAN_HLEN;
177 protocol = vlan_get_protocol(skb);
178 skb->protocol = protocol;
179 }
180
181 if (skb->protocol == htons(ETH_P_IP)) {
182 flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
183 /* check for tcp/udp header */
184 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
185
186 } else if (skb->protocol == htons(ETH_P_IPV6)) {
187 /* ipv6 has not l3 cs, check for L4 header */
188 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
189 }
190
191 flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
192 }
193 }
194
195 flag_ipoffset |= frag_end << HNS_TXD_FE_B;
196
197 desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
198 desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
199
200 ring_ptr_move_fw(ring, next_to_use);
201}
202
203static void unfill_desc(struct hnae_ring *ring)
204{
205 ring_ptr_move_bw(ring, next_to_use);
206}
207
13ac695e
S
208static int hns_nic_maybe_stop_tx(
209 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
b5996f11 210{
13ac695e
S
211 struct sk_buff *skb = *out_skb;
212 struct sk_buff *new_skb = NULL;
b5996f11 213 int buf_num;
b5996f11 214
215 /* no. of segments (plus a header) */
216 buf_num = skb_shinfo(skb)->nr_frags + 1;
217
218 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
13ac695e
S
219 if (ring_space(ring) < 1)
220 return -EBUSY;
b5996f11 221
222 new_skb = skb_copy(skb, GFP_ATOMIC);
13ac695e
S
223 if (!new_skb)
224 return -ENOMEM;
b5996f11 225
226 dev_kfree_skb_any(skb);
13ac695e 227 *out_skb = new_skb;
b5996f11 228 buf_num = 1;
b5996f11 229 } else if (buf_num > ring_space(ring)) {
13ac695e
S
230 return -EBUSY;
231 }
232
233 *bnum = buf_num;
234 return 0;
235}
236
64353af6
S
237static int hns_nic_maybe_stop_tso(
238 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
239{
240 int i;
241 int size;
242 int buf_num;
243 int frag_num;
244 struct sk_buff *skb = *out_skb;
245 struct sk_buff *new_skb = NULL;
246 struct skb_frag_struct *frag;
247
248 size = skb_headlen(skb);
249 buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
250
251 frag_num = skb_shinfo(skb)->nr_frags;
252 for (i = 0; i < frag_num; i++) {
253 frag = &skb_shinfo(skb)->frags[i];
254 size = skb_frag_size(frag);
255 buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
256 }
257
258 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
259 buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
260 if (ring_space(ring) < buf_num)
261 return -EBUSY;
262 /* manual split the send packet */
263 new_skb = skb_copy(skb, GFP_ATOMIC);
264 if (!new_skb)
265 return -ENOMEM;
266 dev_kfree_skb_any(skb);
267 *out_skb = new_skb;
268
269 } else if (ring_space(ring) < buf_num) {
270 return -EBUSY;
271 }
272
273 *bnum = buf_num;
274 return 0;
275}
276
277static void fill_tso_desc(struct hnae_ring *ring, void *priv,
278 int size, dma_addr_t dma, int frag_end,
279 int buf_num, enum hns_desc_type type, int mtu)
280{
281 int frag_buf_num;
282 int sizeoflast;
283 int k;
284
285 frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
286 sizeoflast = size % BD_MAX_SEND_SIZE;
287 sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
288
289 /* when the frag size is bigger than hardware, split this frag */
290 for (k = 0; k < frag_buf_num; k++)
291 fill_v2_desc(ring, priv,
292 (k == frag_buf_num - 1) ?
293 sizeoflast : BD_MAX_SEND_SIZE,
294 dma + BD_MAX_SEND_SIZE * k,
295 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
296 buf_num,
297 (type == DESC_TYPE_SKB && !k) ?
298 DESC_TYPE_SKB : DESC_TYPE_PAGE,
299 mtu);
300}
301
13ac695e
S
302int hns_nic_net_xmit_hw(struct net_device *ndev,
303 struct sk_buff *skb,
304 struct hns_nic_ring_data *ring_data)
305{
306 struct hns_nic_priv *priv = netdev_priv(ndev);
307 struct device *dev = priv->dev;
308 struct hnae_ring *ring = ring_data->ring;
309 struct netdev_queue *dev_queue;
310 struct skb_frag_struct *frag;
311 int buf_num;
312 int seg_num;
313 dma_addr_t dma;
314 int size, next_to_use;
315 int i;
316
317 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
318 case -EBUSY:
b5996f11 319 ring->stats.tx_busy++;
320 goto out_net_tx_busy;
13ac695e
S
321 case -ENOMEM:
322 ring->stats.sw_err_cnt++;
323 netdev_err(ndev, "no memory to xmit!\n");
324 goto out_err_tx_ok;
325 default:
326 break;
b5996f11 327 }
13ac695e
S
328
329 /* no. of segments (plus a header) */
330 seg_num = skb_shinfo(skb)->nr_frags + 1;
b5996f11 331 next_to_use = ring->next_to_use;
332
333 /* fill the first part */
334 size = skb_headlen(skb);
335 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
336 if (dma_mapping_error(dev, dma)) {
337 netdev_err(ndev, "TX head DMA map failed\n");
338 ring->stats.sw_err_cnt++;
339 goto out_err_tx_ok;
340 }
13ac695e
S
341 priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
342 buf_num, DESC_TYPE_SKB, ndev->mtu);
b5996f11 343
344 /* fill the fragments */
13ac695e 345 for (i = 1; i < seg_num; i++) {
b5996f11 346 frag = &skb_shinfo(skb)->frags[i - 1];
347 size = skb_frag_size(frag);
348 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
349 if (dma_mapping_error(dev, dma)) {
350 netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
351 ring->stats.sw_err_cnt++;
352 goto out_map_frag_fail;
353 }
13ac695e
S
354 priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
355 seg_num - 1 == i ? 1 : 0, buf_num,
356 DESC_TYPE_PAGE, ndev->mtu);
b5996f11 357 }
358
359 /*complete translate all packets*/
360 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
361 netdev_tx_sent_queue(dev_queue, skb->len);
362
363 wmb(); /* commit all data before submit */
364 assert(skb->queue_mapping < priv->ae_handle->q_num);
365 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
366 ring->stats.tx_pkts++;
367 ring->stats.tx_bytes += skb->len;
368
369 return NETDEV_TX_OK;
370
371out_map_frag_fail:
372
13ac695e 373 while (ring->next_to_use != next_to_use) {
b5996f11 374 unfill_desc(ring);
13ac695e
S
375 if (ring->next_to_use != next_to_use)
376 dma_unmap_page(dev,
377 ring->desc_cb[ring->next_to_use].dma,
378 ring->desc_cb[ring->next_to_use].length,
379 DMA_TO_DEVICE);
380 else
381 dma_unmap_single(dev,
382 ring->desc_cb[next_to_use].dma,
383 ring->desc_cb[next_to_use].length,
384 DMA_TO_DEVICE);
b5996f11 385 }
386
b5996f11 387out_err_tx_ok:
388
389 dev_kfree_skb_any(skb);
390 return NETDEV_TX_OK;
391
392out_net_tx_busy:
393
394 netif_stop_subqueue(ndev, skb->queue_mapping);
395
396 /* Herbert's original patch had:
397 * smp_mb__after_netif_stop_queue();
398 * but since that doesn't exist yet, just open code it.
399 */
400 smp_mb();
401 return NETDEV_TX_BUSY;
402}
403
404/**
405 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
406 * @data: pointer to the start of the headers
407 * @max: total length of section to find headers in
408 *
409 * This function is meant to determine the length of headers that will
410 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
411 * motivation of doing this is to only perform one pull for IPv4 TCP
412 * packets so that we can do basic things like calculating the gso_size
413 * based on the average data per packet.
414 **/
415static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
416 unsigned int max_size)
417{
418 unsigned char *network;
419 u8 hlen;
420
421 /* this should never happen, but better safe than sorry */
422 if (max_size < ETH_HLEN)
423 return max_size;
424
425 /* initialize network frame pointer */
426 network = data;
427
428 /* set first protocol and move network header forward */
429 network += ETH_HLEN;
430
431 /* handle any vlan tag if present */
432 if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
433 == HNS_RX_FLAG_VLAN_PRESENT) {
434 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
435 return max_size;
436
437 network += VLAN_HLEN;
438 }
439
440 /* handle L3 protocols */
441 if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
442 == HNS_RX_FLAG_L3ID_IPV4) {
443 if ((typeof(max_size))(network - data) >
444 (max_size - sizeof(struct iphdr)))
445 return max_size;
446
447 /* access ihl as a u8 to avoid unaligned access on ia64 */
448 hlen = (network[0] & 0x0F) << 2;
449
450 /* verify hlen meets minimum size requirements */
451 if (hlen < sizeof(struct iphdr))
452 return network - data;
453
454 /* record next protocol if header is present */
455 } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
456 == HNS_RX_FLAG_L3ID_IPV6) {
457 if ((typeof(max_size))(network - data) >
458 (max_size - sizeof(struct ipv6hdr)))
459 return max_size;
460
461 /* record next protocol */
462 hlen = sizeof(struct ipv6hdr);
463 } else {
464 return network - data;
465 }
466
467 /* relocate pointer to start of L4 header */
468 network += hlen;
469
470 /* finally sort out TCP/UDP */
471 if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
472 == HNS_RX_FLAG_L4ID_TCP) {
473 if ((typeof(max_size))(network - data) >
474 (max_size - sizeof(struct tcphdr)))
475 return max_size;
476
477 /* access doff as a u8 to avoid unaligned access on ia64 */
478 hlen = (network[12] & 0xF0) >> 2;
479
480 /* verify hlen meets minimum size requirements */
481 if (hlen < sizeof(struct tcphdr))
482 return network - data;
483
484 network += hlen;
485 } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
486 == HNS_RX_FLAG_L4ID_UDP) {
487 if ((typeof(max_size))(network - data) >
488 (max_size - sizeof(struct udphdr)))
489 return max_size;
490
491 network += sizeof(struct udphdr);
492 }
493
494 /* If everything has gone correctly network should be the
495 * data section of the packet and will be the end of the header.
496 * If not then it probably represents the end of the last recognized
497 * header.
498 */
499 if ((typeof(max_size))(network - data) < max_size)
500 return network - data;
501 else
502 return max_size;
503}
504
9cbe9fd5 505static void hns_nic_reuse_page(struct sk_buff *skb, int i,
506 struct hnae_ring *ring, int pull_len,
507 struct hnae_desc_cb *desc_cb)
b5996f11 508{
9cbe9fd5 509 struct hnae_desc *desc;
510 int truesize, size;
511 int last_offset;
be78a690
AB
512 bool twobufs;
513
514 twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
9cbe9fd5 515
516 desc = &ring->desc[ring->next_to_clean];
517 size = le16_to_cpu(desc->rx.size);
518
be78a690 519 if (twobufs) {
9cbe9fd5 520 truesize = hnae_buf_size(ring);
521 } else {
522 truesize = ALIGN(size, L1_CACHE_BYTES);
523 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
524 }
525
9cbe9fd5 526 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
527 size - pull_len, truesize - pull_len);
528
b5996f11 529 /* avoid re-using remote pages,flag default unreuse */
be78a690
AB
530 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
531 return;
532
533 if (twobufs) {
534 /* if we are only owner of page we can reuse it */
535 if (likely(page_count(desc_cb->priv) == 1)) {
536 /* flip page offset to other buffer */
537 desc_cb->page_offset ^= truesize;
b5996f11 538
b5996f11 539 desc_cb->reuse_flag = 1;
540 /* bump ref count on page before it is given*/
541 get_page(desc_cb->priv);
542 }
be78a690
AB
543 return;
544 }
545
546 /* move offset up to the next cache line */
547 desc_cb->page_offset += truesize;
548
549 if (desc_cb->page_offset <= last_offset) {
550 desc_cb->reuse_flag = 1;
551 /* bump ref count on page before it is given*/
552 get_page(desc_cb->priv);
b5996f11 553 }
554}
555
13ac695e
S
556static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
557{
558 *out_bnum = hnae_get_field(bnum_flag,
559 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
560}
561
562static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
563{
564 *out_bnum = hnae_get_field(bnum_flag,
565 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
566}
567
b5996f11 568static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
569 struct sk_buff **out_skb, int *out_bnum)
570{
571 struct hnae_ring *ring = ring_data->ring;
572 struct net_device *ndev = ring_data->napi.dev;
13ac695e 573 struct hns_nic_priv *priv = netdev_priv(ndev);
b5996f11 574 struct sk_buff *skb;
575 struct hnae_desc *desc;
576 struct hnae_desc_cb *desc_cb;
0d6b425a 577 struct ethhdr *eh;
b5996f11 578 unsigned char *va;
9cbe9fd5 579 int bnum, length, i;
b5996f11 580 int pull_len;
581 u32 bnum_flag;
582
b5996f11 583 desc = &ring->desc[ring->next_to_clean];
584 desc_cb = &ring->desc_cb[ring->next_to_clean];
13ac695e
S
585
586 prefetch(desc);
587
b5996f11 588 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
589
13ac695e
S
590 /* prefetch first cache line of first page */
591 prefetch(va);
592#if L1_CACHE_BYTES < 128
593 prefetch(va + L1_CACHE_BYTES);
594#endif
595
596 skb = *out_skb = napi_alloc_skb(&ring_data->napi,
597 HNS_RX_HEAD_SIZE);
b5996f11 598 if (unlikely(!skb)) {
599 netdev_err(ndev, "alloc rx skb fail\n");
600 ring->stats.sw_err_cnt++;
601 return -ENOMEM;
602 }
8379f0a8 603 skb_reset_mac_header(skb);
b5996f11 604
9cbe9fd5 605 prefetchw(skb->data);
13ac695e
S
606 length = le16_to_cpu(desc->rx.pkt_len);
607 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
608 priv->ops.get_rxd_bnum(bnum_flag, &bnum);
609 *out_bnum = bnum;
610
b5996f11 611 if (length <= HNS_RX_HEAD_SIZE) {
612 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
613
614 /* we can reuse buffer as-is, just make sure it is local */
615 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
616 desc_cb->reuse_flag = 1;
617 else /* this page cannot be reused so discard it */
618 put_page(desc_cb->priv);
619
620 ring_ptr_move_fw(ring, next_to_clean);
621
622 if (unlikely(bnum != 1)) { /* check err*/
623 *out_bnum = 1;
624 goto out_bnum_err;
625 }
626 } else {
627 ring->stats.seg_pkt_cnt++;
628
629 pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
630 memcpy(__skb_put(skb, pull_len), va,
631 ALIGN(pull_len, sizeof(long)));
632
9cbe9fd5 633 hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
b5996f11 634 ring_ptr_move_fw(ring, next_to_clean);
635
636 if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
637 *out_bnum = 1;
638 goto out_bnum_err;
639 }
640 for (i = 1; i < bnum; i++) {
641 desc = &ring->desc[ring->next_to_clean];
642 desc_cb = &ring->desc_cb[ring->next_to_clean];
b5996f11 643
9cbe9fd5 644 hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
b5996f11 645 ring_ptr_move_fw(ring, next_to_clean);
646 }
647 }
648
649 /* check except process, free skb and jump the desc */
650 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
651out_bnum_err:
652 *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
653 netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
654 bnum, ring->max_desc_num_per_pkt,
655 length, (int)MAX_SKB_FRAGS,
656 ((u64 *)desc)[0], ((u64 *)desc)[1]);
657 ring->stats.err_bd_num++;
658 dev_kfree_skb_any(skb);
659 return -EDOM;
660 }
661
662 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
663
664 if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
665 netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
666 ((u64 *)desc)[0], ((u64 *)desc)[1]);
667 ring->stats.non_vld_descs++;
668 dev_kfree_skb_any(skb);
669 return -EINVAL;
670 }
671
672 if (unlikely((!desc->rx.pkt_len) ||
673 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
b5996f11 674 ring->stats.err_pkt_len++;
675 dev_kfree_skb_any(skb);
676 return -EFAULT;
677 }
678
679 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
b5996f11 680 ring->stats.l2_err++;
681 dev_kfree_skb_any(skb);
682 return -EFAULT;
683 }
684
0d6b425a
KY
685 /* filter out multicast pkt with the same src mac as this port */
686 eh = eth_hdr(skb);
687 if (unlikely(is_multicast_ether_addr(eh->h_dest) &&
688 ether_addr_equal(ndev->dev_addr, eh->h_source))) {
689 dev_kfree_skb_any(skb);
690 return -EFAULT;
691 }
692
b5996f11 693 ring->stats.rx_pkts++;
694 ring->stats.rx_bytes += skb->len;
695
696 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) ||
697 hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) {
b5996f11 698 ring->stats.l3l4_csum_err++;
699 return 0;
700 }
701
702 skb->ip_summed = CHECKSUM_UNNECESSARY;
703
704 return 0;
705}
706
707static void
708hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
709{
710 int i, ret;
711 struct hnae_desc_cb res_cbs;
712 struct hnae_desc_cb *desc_cb;
713 struct hnae_ring *ring = ring_data->ring;
714 struct net_device *ndev = ring_data->napi.dev;
715
716 for (i = 0; i < cleand_count; i++) {
717 desc_cb = &ring->desc_cb[ring->next_to_use];
718 if (desc_cb->reuse_flag) {
719 ring->stats.reuse_pg_cnt++;
720 hnae_reuse_buffer(ring, ring->next_to_use);
721 } else {
722 ret = hnae_reserve_buffer_map(ring, &res_cbs);
723 if (ret) {
724 ring->stats.sw_err_cnt++;
725 netdev_err(ndev, "hnae reserve buffer map failed.\n");
726 break;
727 }
728 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
729 }
730
731 ring_ptr_move_fw(ring, next_to_use);
732 }
733
734 wmb(); /* make all data has been write before submit */
735 writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
736}
737
738/* return error number for error or number of desc left to take
739 */
740static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
741 struct sk_buff *skb)
742{
743 struct net_device *ndev = ring_data->napi.dev;
744
745 skb->protocol = eth_type_trans(skb, ndev);
746 (void)napi_gro_receive(&ring_data->napi, skb);
747 ndev->last_rx = jiffies;
748}
749
750static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
751 int budget, void *v)
752{
753 struct hnae_ring *ring = ring_data->ring;
754 struct sk_buff *skb;
755 int num, bnum, ex_num;
756#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
757 int recv_pkts, recv_bds, clean_count, err;
758
759 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
760 rmb(); /* make sure num taken effect before the other data is touched */
761
762 recv_pkts = 0, recv_bds = 0, clean_count = 0;
763recv:
764 while (recv_pkts < budget && recv_bds < num) {
765 /* reuse or realloc buffers*/
766 if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
767 hns_nic_alloc_rx_buffers(ring_data, clean_count);
768 clean_count = 0;
769 }
770
771 /* poll one pkg*/
772 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
773 if (unlikely(!skb)) /* this fault cannot be repaired */
774 break;
775
776 recv_bds += bnum;
777 clean_count += bnum;
778 if (unlikely(err)) { /* do jump the err */
779 recv_pkts++;
780 continue;
781 }
782
783 /* do update ip stack process*/
784 ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
785 ring_data, skb);
786 recv_pkts++;
787 }
788
789 /* make all data has been write before submit */
b5996f11 790 if (recv_pkts < budget) {
791 ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
9cbe9fd5 792
13ac695e
S
793 if (ex_num > clean_count) {
794 num += ex_num - clean_count;
9cbe9fd5 795 rmb(); /*complete read rx ring bd number*/
b5996f11 796 goto recv;
797 }
798 }
799
13ac695e
S
800 /* make all data has been write before submit */
801 if (clean_count > 0)
802 hns_nic_alloc_rx_buffers(ring_data, clean_count);
803
b5996f11 804 return recv_pkts;
805}
806
807static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
808{
809 struct hnae_ring *ring = ring_data->ring;
810 int num = 0;
811
812 /* for hardware bug fixed */
813 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
814
815 if (num > 0) {
816 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
817 ring_data->ring, 1);
818
819 napi_schedule(&ring_data->napi);
820 }
821}
822
823static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
824 int *bytes, int *pkts)
825{
826 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
827
828 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
829 (*bytes) += desc_cb->length;
830 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
831 hnae_free_buffer_detach(ring, ring->next_to_clean);
832
833 ring_ptr_move_fw(ring, next_to_clean);
834}
835
836static int is_valid_clean_head(struct hnae_ring *ring, int h)
837{
838 int u = ring->next_to_use;
839 int c = ring->next_to_clean;
840
841 if (unlikely(h > ring->desc_num))
842 return 0;
843
844 assert(u > 0 && u < ring->desc_num);
845 assert(c > 0 && c < ring->desc_num);
846 assert(u != c && h != c); /* must be checked before call this func */
847
848 return u > c ? (h > c && h <= u) : (h > c || h <= u);
849}
850
851/* netif_tx_lock will turn down the performance, set only when necessary */
852#ifdef CONFIG_NET_POLL_CONTROLLER
853#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
854#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
855#else
856#define NETIF_TX_LOCK(ndev)
857#define NETIF_TX_UNLOCK(ndev)
858#endif
859/* reclaim all desc in one budget
860 * return error or number of desc left
861 */
862static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
863 int budget, void *v)
864{
865 struct hnae_ring *ring = ring_data->ring;
866 struct net_device *ndev = ring_data->napi.dev;
867 struct netdev_queue *dev_queue;
868 struct hns_nic_priv *priv = netdev_priv(ndev);
869 int head;
870 int bytes, pkts;
871
872 NETIF_TX_LOCK(ndev);
873
874 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
875 rmb(); /* make sure head is ready before touch any data */
876
877 if (is_ring_empty(ring) || head == ring->next_to_clean) {
878 NETIF_TX_UNLOCK(ndev);
879 return 0; /* no data to poll */
880 }
881
882 if (!is_valid_clean_head(ring, head)) {
883 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
884 ring->next_to_use, ring->next_to_clean);
885 ring->stats.io_err_cnt++;
886 NETIF_TX_UNLOCK(ndev);
887 return -EIO;
888 }
889
890 bytes = 0;
891 pkts = 0;
9cbe9fd5 892 while (head != ring->next_to_clean) {
b5996f11 893 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
9cbe9fd5 894 /* issue prefetch for next Tx descriptor */
895 prefetch(&ring->desc_cb[ring->next_to_clean]);
896 }
b5996f11 897
898 NETIF_TX_UNLOCK(ndev);
899
900 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
901 netdev_tx_completed_queue(dev_queue, pkts, bytes);
902
13ac695e
S
903 if (unlikely(priv->link && !netif_carrier_ok(ndev)))
904 netif_carrier_on(ndev);
905
b5996f11 906 if (unlikely(pkts && netif_carrier_ok(ndev) &&
907 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
908 /* Make sure that anybody stopping the queue after this
909 * sees the new next_to_clean.
910 */
911 smp_mb();
912 if (netif_tx_queue_stopped(dev_queue) &&
913 !test_bit(NIC_STATE_DOWN, &priv->state)) {
914 netif_tx_wake_queue(dev_queue);
915 ring->stats.restart_queue++;
916 }
917 }
918 return 0;
919}
920
921static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
922{
923 struct hnae_ring *ring = ring_data->ring;
1c3bae6e 924 int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
b5996f11 925
926 if (head != ring->next_to_clean) {
927 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
928 ring_data->ring, 1);
929
930 napi_schedule(&ring_data->napi);
931 }
932}
933
934static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
935{
936 struct hnae_ring *ring = ring_data->ring;
937 struct net_device *ndev = ring_data->napi.dev;
938 struct netdev_queue *dev_queue;
939 int head;
940 int bytes, pkts;
941
942 NETIF_TX_LOCK(ndev);
943
944 head = ring->next_to_use; /* ntu :soft setted ring position*/
945 bytes = 0;
946 pkts = 0;
947 while (head != ring->next_to_clean)
948 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
949
950 NETIF_TX_UNLOCK(ndev);
951
952 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
953 netdev_tx_reset_queue(dev_queue);
954}
955
956static int hns_nic_common_poll(struct napi_struct *napi, int budget)
957{
958 struct hns_nic_ring_data *ring_data =
959 container_of(napi, struct hns_nic_ring_data, napi);
960 int clean_complete = ring_data->poll_one(
961 ring_data, budget, ring_data->ex_process);
962
963 if (clean_complete >= 0 && clean_complete < budget) {
964 napi_complete(napi);
965 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
966 ring_data->ring, 0);
4b34aa41
SL
967 if (ring_data->fini_process)
968 ring_data->fini_process(ring_data);
9cbe9fd5 969 return 0;
b5996f11 970 }
971
972 return clean_complete;
973}
974
975static irqreturn_t hns_irq_handle(int irq, void *dev)
976{
977 struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
978
979 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
980 ring_data->ring, 1);
981 napi_schedule(&ring_data->napi);
982
983 return IRQ_HANDLED;
984}
985
986/**
987 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
988 *@ndev: net device
989 */
990static void hns_nic_adjust_link(struct net_device *ndev)
991{
992 struct hns_nic_priv *priv = netdev_priv(ndev);
993 struct hnae_handle *h = priv->ae_handle;
bb7189dc
QX
994 int state = 1;
995
996 if (priv->phy) {
997 h->dev->ops->adjust_link(h, ndev->phydev->speed,
998 ndev->phydev->duplex);
999 state = priv->phy->link;
1000 }
1001 state = state && h->dev->ops->get_status(h);
b5996f11 1002
bb7189dc
QX
1003 if (state != priv->link) {
1004 if (state) {
1005 netif_carrier_on(ndev);
1006 netif_tx_wake_all_queues(ndev);
1007 netdev_info(ndev, "link up\n");
1008 } else {
1009 netif_carrier_off(ndev);
1010 netdev_info(ndev, "link down\n");
1011 }
1012 priv->link = state;
1013 }
b5996f11 1014}
1015
1016/**
1017 *hns_nic_init_phy - init phy
1018 *@ndev: net device
1019 *@h: ae handle
1020 * Return 0 on success, negative on failure
1021 */
1022int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1023{
1024 struct hns_nic_priv *priv = netdev_priv(ndev);
652d39b0
KY
1025 struct phy_device *phy_dev = h->phy_dev;
1026 int ret;
b5996f11 1027
652d39b0 1028 if (!h->phy_dev)
b5996f11 1029 return 0;
1030
652d39b0
KY
1031 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
1032 phy_dev->dev_flags = 0;
b5996f11 1033
652d39b0
KY
1034 ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
1035 h->phy_if);
1036 } else {
1037 ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
1038 }
1039 if (unlikely(ret))
1040 return -ENODEV;
b5996f11 1041
1042 phy_dev->supported &= h->if_support;
1043 phy_dev->advertising = phy_dev->supported;
1044
1045 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1046 phy_dev->autoneg = false;
1047
1048 priv->phy = phy_dev;
1049
1050 return 0;
1051}
1052
1053static int hns_nic_ring_open(struct net_device *netdev, int idx)
1054{
1055 struct hns_nic_priv *priv = netdev_priv(netdev);
1056 struct hnae_handle *h = priv->ae_handle;
1057
1058 napi_enable(&priv->ring_data[idx].napi);
1059
1060 enable_irq(priv->ring_data[idx].ring->irq);
1061 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
1062
1063 return 0;
1064}
1065
1066static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
1067{
1068 struct hns_nic_priv *priv = netdev_priv(ndev);
1069 struct hnae_handle *h = priv->ae_handle;
1070 struct sockaddr *mac_addr = p;
1071 int ret;
1072
1073 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1074 return -EADDRNOTAVAIL;
1075
1076 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
1077 if (ret) {
1078 netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
1079 return ret;
1080 }
1081
1082 memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
1083
1084 return 0;
1085}
1086
1087void hns_nic_update_stats(struct net_device *netdev)
1088{
1089 struct hns_nic_priv *priv = netdev_priv(netdev);
1090 struct hnae_handle *h = priv->ae_handle;
1091
1092 h->dev->ops->update_stats(h, &netdev->stats);
1093}
1094
1095/* set mac addr if it is configed. or leave it to the AE driver */
1096static void hns_init_mac_addr(struct net_device *ndev)
1097{
1098 struct hns_nic_priv *priv = netdev_priv(ndev);
b5996f11 1099
6162928c 1100 if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
b5996f11 1101 eth_hw_addr_random(ndev);
1102 dev_warn(priv->dev, "No valid mac, use random mac %pM",
1103 ndev->dev_addr);
1104 }
1105}
1106
1107static void hns_nic_ring_close(struct net_device *netdev, int idx)
1108{
1109 struct hns_nic_priv *priv = netdev_priv(netdev);
1110 struct hnae_handle *h = priv->ae_handle;
1111
1112 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
1113 disable_irq(priv->ring_data[idx].ring->irq);
1114
1115 napi_disable(&priv->ring_data[idx].napi);
1116}
1117
13ac695e 1118static void hns_set_irq_affinity(struct hns_nic_priv *priv)
b5996f11 1119{
1120 struct hnae_handle *h = priv->ae_handle;
1121 struct hns_nic_ring_data *rd;
1122 int i;
b5996f11 1123 int cpu;
1124 cpumask_t mask;
1125
13ac695e
S
1126 /*diffrent irq banlance for 16core and 32core*/
1127 if (h->q_num == num_possible_cpus()) {
1128 for (i = 0; i < h->q_num * 2; i++) {
1129 rd = &priv->ring_data[i];
1130 if (cpu_online(rd->queue_index)) {
1131 cpumask_clear(&mask);
1132 cpu = rd->queue_index;
1133 cpumask_set_cpu(cpu, &mask);
1134 (void)irq_set_affinity_hint(rd->ring->irq,
1135 &mask);
1136 }
1137 }
1138 } else {
1139 for (i = 0; i < h->q_num; i++) {
1140 rd = &priv->ring_data[i];
1141 if (cpu_online(rd->queue_index * 2)) {
1142 cpumask_clear(&mask);
1143 cpu = rd->queue_index * 2;
1144 cpumask_set_cpu(cpu, &mask);
1145 (void)irq_set_affinity_hint(rd->ring->irq,
1146 &mask);
1147 }
1148 }
1149
1150 for (i = h->q_num; i < h->q_num * 2; i++) {
1151 rd = &priv->ring_data[i];
1152 if (cpu_online(rd->queue_index * 2 + 1)) {
1153 cpumask_clear(&mask);
1154 cpu = rd->queue_index * 2 + 1;
1155 cpumask_set_cpu(cpu, &mask);
1156 (void)irq_set_affinity_hint(rd->ring->irq,
1157 &mask);
1158 }
1159 }
1160 }
1161}
1162
1163static int hns_nic_init_irq(struct hns_nic_priv *priv)
1164{
1165 struct hnae_handle *h = priv->ae_handle;
1166 struct hns_nic_ring_data *rd;
1167 int i;
1168 int ret;
1169
b5996f11 1170 for (i = 0; i < h->q_num * 2; i++) {
1171 rd = &priv->ring_data[i];
1172
1173 if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
1174 break;
1175
1176 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
1177 "%s-%s%d", priv->netdev->name,
1178 (i < h->q_num ? "tx" : "rx"), rd->queue_index);
1179
1180 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
1181
1182 ret = request_irq(rd->ring->irq,
1183 hns_irq_handle, 0, rd->ring->ring_name, rd);
1184 if (ret) {
1185 netdev_err(priv->netdev, "request irq(%d) fail\n",
1186 rd->ring->irq);
1187 return ret;
1188 }
1189 disable_irq(rd->ring->irq);
1190 rd->ring->irq_init_flag = RCB_IRQ_INITED;
b5996f11 1191 }
1192
13ac695e
S
1193 /*set cpu affinity*/
1194 hns_set_irq_affinity(priv);
1195
b5996f11 1196 return 0;
1197}
1198
1199static int hns_nic_net_up(struct net_device *ndev)
1200{
1201 struct hns_nic_priv *priv = netdev_priv(ndev);
1202 struct hnae_handle *h = priv->ae_handle;
454784d8 1203 int i, j;
b5996f11 1204 int ret;
1205
1206 ret = hns_nic_init_irq(priv);
1207 if (ret != 0) {
1208 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
1209 return ret;
1210 }
1211
1212 for (i = 0; i < h->q_num * 2; i++) {
1213 ret = hns_nic_ring_open(ndev, i);
1214 if (ret)
1215 goto out_has_some_queues;
1216 }
1217
b5996f11 1218 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
1219 if (ret)
1220 goto out_set_mac_addr_err;
1221
1222 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
1223 if (ret)
1224 goto out_start_err;
1225
1226 if (priv->phy)
1227 phy_start(priv->phy);
1228
1229 clear_bit(NIC_STATE_DOWN, &priv->state);
1230 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1231
1232 return 0;
1233
1234out_start_err:
1235 netif_stop_queue(ndev);
1236out_set_mac_addr_err:
b5996f11 1237out_has_some_queues:
1238 for (j = i - 1; j >= 0; j--)
1239 hns_nic_ring_close(ndev, j);
1240
1241 set_bit(NIC_STATE_DOWN, &priv->state);
1242
1243 return ret;
1244}
1245
1246static void hns_nic_net_down(struct net_device *ndev)
1247{
1248 int i;
1249 struct hnae_ae_ops *ops;
1250 struct hns_nic_priv *priv = netdev_priv(ndev);
1251
1252 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
1253 return;
1254
1255 (void)del_timer_sync(&priv->service_timer);
1256 netif_tx_stop_all_queues(ndev);
1257 netif_carrier_off(ndev);
1258 netif_tx_disable(ndev);
1259 priv->link = 0;
1260
1261 if (priv->phy)
1262 phy_stop(priv->phy);
1263
1264 ops = priv->ae_handle->dev->ops;
1265
1266 if (ops->stop)
1267 ops->stop(priv->ae_handle);
1268
1269 netif_tx_stop_all_queues(ndev);
1270
1271 for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
1272 hns_nic_ring_close(ndev, i);
1273 hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
1274
1275 /* clean tx buffers*/
1276 hns_nic_tx_clr_all_bufs(priv->ring_data + i);
1277 }
1278}
1279
1280void hns_nic_net_reset(struct net_device *ndev)
1281{
1282 struct hns_nic_priv *priv = netdev_priv(ndev);
1283 struct hnae_handle *handle = priv->ae_handle;
1284
1285 while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
1286 usleep_range(1000, 2000);
1287
1288 (void)hnae_reinit_handle(handle);
1289
1290 clear_bit(NIC_STATE_RESETTING, &priv->state);
1291}
1292
1293void hns_nic_net_reinit(struct net_device *netdev)
1294{
1295 struct hns_nic_priv *priv = netdev_priv(netdev);
1296
860e9538 1297 netif_trans_update(priv->netdev);
b5996f11 1298 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
1299 usleep_range(1000, 2000);
1300
1301 hns_nic_net_down(netdev);
1302 hns_nic_net_reset(netdev);
1303 (void)hns_nic_net_up(netdev);
1304 clear_bit(NIC_STATE_REINITING, &priv->state);
1305}
1306
1307static int hns_nic_net_open(struct net_device *ndev)
1308{
1309 struct hns_nic_priv *priv = netdev_priv(ndev);
1310 struct hnae_handle *h = priv->ae_handle;
1311 int ret;
1312
1313 if (test_bit(NIC_STATE_TESTING, &priv->state))
1314 return -EBUSY;
1315
1316 priv->link = 0;
1317 netif_carrier_off(ndev);
1318
1319 ret = netif_set_real_num_tx_queues(ndev, h->q_num);
1320 if (ret < 0) {
1321 netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1322 ret);
1323 return ret;
1324 }
1325
1326 ret = netif_set_real_num_rx_queues(ndev, h->q_num);
1327 if (ret < 0) {
1328 netdev_err(ndev,
1329 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
1330 return ret;
1331 }
1332
1333 ret = hns_nic_net_up(ndev);
1334 if (ret) {
1335 netdev_err(ndev,
1336 "hns net up fail, ret=%d!\n", ret);
1337 return ret;
1338 }
1339
1340 return 0;
1341}
1342
1343static int hns_nic_net_stop(struct net_device *ndev)
1344{
1345 hns_nic_net_down(ndev);
1346
1347 return 0;
1348}
1349
1350static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
1351static void hns_nic_net_timeout(struct net_device *ndev)
1352{
1353 struct hns_nic_priv *priv = netdev_priv(ndev);
1354
1355 hns_tx_timeout_reset(priv);
1356}
1357
1358static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
1359 int cmd)
1360{
1361 struct hns_nic_priv *priv = netdev_priv(netdev);
1362 struct phy_device *phy_dev = priv->phy;
1363
1364 if (!netif_running(netdev))
1365 return -EINVAL;
1366
1367 if (!phy_dev)
1368 return -ENOTSUPP;
1369
1370 return phy_mii_ioctl(phy_dev, ifr, cmd);
1371}
1372
1373/* use only for netconsole to poll with the device without interrupt */
1374#ifdef CONFIG_NET_POLL_CONTROLLER
1375void hns_nic_poll_controller(struct net_device *ndev)
1376{
1377 struct hns_nic_priv *priv = netdev_priv(ndev);
1378 unsigned long flags;
1379 int i;
1380
1381 local_irq_save(flags);
1382 for (i = 0; i < priv->ae_handle->q_num * 2; i++)
1383 napi_schedule(&priv->ring_data[i].napi);
1384 local_irq_restore(flags);
1385}
1386#endif
1387
1388static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
1389 struct net_device *ndev)
1390{
1391 struct hns_nic_priv *priv = netdev_priv(ndev);
1392 int ret;
1393
1394 assert(skb->queue_mapping < ndev->ae_handle->q_num);
1395 ret = hns_nic_net_xmit_hw(ndev, skb,
1396 &tx_ring_data(priv, skb->queue_mapping));
1397 if (ret == NETDEV_TX_OK) {
860e9538 1398 netif_trans_update(ndev);
b5996f11 1399 ndev->stats.tx_bytes += skb->len;
1400 ndev->stats.tx_packets++;
1401 }
1402 return (netdev_tx_t)ret;
1403}
1404
1405static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
1406{
1407 struct hns_nic_priv *priv = netdev_priv(ndev);
1408 struct hnae_handle *h = priv->ae_handle;
1409 int ret;
1410
1411 /* MTU < 68 is an error and causes problems on some kernels */
1412 if (new_mtu < 68)
1413 return -EINVAL;
1414
1415 if (!h->dev->ops->set_mtu)
1416 return -ENOTSUPP;
1417
1418 if (netif_running(ndev)) {
1419 (void)hns_nic_net_stop(ndev);
1420 msleep(100);
1421
1422 ret = h->dev->ops->set_mtu(h, new_mtu);
1423 if (ret)
1424 netdev_err(ndev, "set mtu fail, return value %d\n",
1425 ret);
1426
1427 if (hns_nic_net_open(ndev))
1428 netdev_err(ndev, "hns net open fail\n");
1429 } else {
1430 ret = h->dev->ops->set_mtu(h, new_mtu);
1431 }
1432
1433 if (!ret)
1434 ndev->mtu = new_mtu;
1435
1436 return ret;
1437}
1438
38f616da
S
1439static int hns_nic_set_features(struct net_device *netdev,
1440 netdev_features_t features)
1441{
1442 struct hns_nic_priv *priv = netdev_priv(netdev);
1443 struct hnae_handle *h = priv->ae_handle;
1444
1445 switch (priv->enet_ver) {
1446 case AE_VERSION_1:
1447 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1448 netdev_info(netdev, "enet v1 do not support tso!\n");
1449 break;
1450 default:
1451 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1452 priv->ops.fill_desc = fill_tso_desc;
1453 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1454 /* The chip only support 7*4096 */
1455 netif_set_gso_max_size(netdev, 7 * 4096);
1456 h->dev->ops->set_tso_stats(h, 1);
1457 } else {
1458 priv->ops.fill_desc = fill_v2_desc;
1459 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1460 h->dev->ops->set_tso_stats(h, 0);
1461 }
1462 break;
1463 }
1464 netdev->features = features;
1465 return 0;
1466}
1467
1468static netdev_features_t hns_nic_fix_features(
1469 struct net_device *netdev, netdev_features_t features)
1470{
1471 struct hns_nic_priv *priv = netdev_priv(netdev);
1472
1473 switch (priv->enet_ver) {
1474 case AE_VERSION_1:
1475 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
1476 NETIF_F_HW_VLAN_CTAG_FILTER);
1477 break;
1478 default:
1479 break;
1480 }
1481 return features;
1482}
1483
b5996f11 1484/**
1485 * nic_set_multicast_list - set mutl mac address
1486 * @netdev: net device
1487 * @p: mac address
1488 *
1489 * return void
1490 */
1491void hns_set_multicast_list(struct net_device *ndev)
1492{
1493 struct hns_nic_priv *priv = netdev_priv(ndev);
1494 struct hnae_handle *h = priv->ae_handle;
1495 struct netdev_hw_addr *ha = NULL;
1496
1497 if (!h) {
1498 netdev_err(ndev, "hnae handle is null\n");
1499 return;
1500 }
1501
1502 if (h->dev->ops->set_mc_addr) {
1503 netdev_for_each_mc_addr(ha, ndev)
1504 if (h->dev->ops->set_mc_addr(h, ha->addr))
1505 netdev_err(ndev, "set multicast fail\n");
1506 }
1507}
1508
4568637f 1509void hns_nic_set_rx_mode(struct net_device *ndev)
1510{
1511 struct hns_nic_priv *priv = netdev_priv(ndev);
1512 struct hnae_handle *h = priv->ae_handle;
1513
1514 if (h->dev->ops->set_promisc_mode) {
1515 if (ndev->flags & IFF_PROMISC)
1516 h->dev->ops->set_promisc_mode(h, 1);
1517 else
1518 h->dev->ops->set_promisc_mode(h, 0);
1519 }
1520
1521 hns_set_multicast_list(ndev);
1522}
1523
b5996f11 1524struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
1525 struct rtnl_link_stats64 *stats)
1526{
1527 int idx = 0;
1528 u64 tx_bytes = 0;
1529 u64 rx_bytes = 0;
1530 u64 tx_pkts = 0;
1531 u64 rx_pkts = 0;
1532 struct hns_nic_priv *priv = netdev_priv(ndev);
1533 struct hnae_handle *h = priv->ae_handle;
1534
1535 for (idx = 0; idx < h->q_num; idx++) {
1536 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
1537 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
1538 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
1539 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
1540 }
1541
1542 stats->tx_bytes = tx_bytes;
1543 stats->tx_packets = tx_pkts;
1544 stats->rx_bytes = rx_bytes;
1545 stats->rx_packets = rx_pkts;
1546
1547 stats->rx_errors = ndev->stats.rx_errors;
1548 stats->multicast = ndev->stats.multicast;
1549 stats->rx_length_errors = ndev->stats.rx_length_errors;
1550 stats->rx_crc_errors = ndev->stats.rx_crc_errors;
1551 stats->rx_missed_errors = ndev->stats.rx_missed_errors;
1552
1553 stats->tx_errors = ndev->stats.tx_errors;
1554 stats->rx_dropped = ndev->stats.rx_dropped;
1555 stats->tx_dropped = ndev->stats.tx_dropped;
1556 stats->collisions = ndev->stats.collisions;
1557 stats->rx_over_errors = ndev->stats.rx_over_errors;
1558 stats->rx_frame_errors = ndev->stats.rx_frame_errors;
1559 stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
1560 stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
1561 stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
1562 stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
1563 stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
1564 stats->tx_window_errors = ndev->stats.tx_window_errors;
1565 stats->rx_compressed = ndev->stats.rx_compressed;
1566 stats->tx_compressed = ndev->stats.tx_compressed;
1567
1568 return stats;
1569}
1570
1571static const struct net_device_ops hns_nic_netdev_ops = {
1572 .ndo_open = hns_nic_net_open,
1573 .ndo_stop = hns_nic_net_stop,
1574 .ndo_start_xmit = hns_nic_net_xmit,
1575 .ndo_tx_timeout = hns_nic_net_timeout,
1576 .ndo_set_mac_address = hns_nic_net_set_mac_address,
1577 .ndo_change_mtu = hns_nic_change_mtu,
1578 .ndo_do_ioctl = hns_nic_do_ioctl,
38f616da
S
1579 .ndo_set_features = hns_nic_set_features,
1580 .ndo_fix_features = hns_nic_fix_features,
b5996f11 1581 .ndo_get_stats64 = hns_nic_get_stats64,
1582#ifdef CONFIG_NET_POLL_CONTROLLER
1583 .ndo_poll_controller = hns_nic_poll_controller,
1584#endif
4568637f 1585 .ndo_set_rx_mode = hns_nic_set_rx_mode,
b5996f11 1586};
1587
1588static void hns_nic_update_link_status(struct net_device *netdev)
1589{
1590 struct hns_nic_priv *priv = netdev_priv(netdev);
1591
1592 struct hnae_handle *h = priv->ae_handle;
b5996f11 1593
bb7189dc
QX
1594 if (h->phy_dev) {
1595 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1596 return;
b5996f11 1597
bb7189dc 1598 (void)genphy_read_status(h->phy_dev);
b5996f11 1599 }
bb7189dc 1600 hns_nic_adjust_link(netdev);
b5996f11 1601}
1602
1603/* for dumping key regs*/
1604static void hns_nic_dump(struct hns_nic_priv *priv)
1605{
1606 struct hnae_handle *h = priv->ae_handle;
1607 struct hnae_ae_ops *ops = h->dev->ops;
1608 u32 *data, reg_num, i;
1609
1610 if (ops->get_regs_len && ops->get_regs) {
1611 reg_num = ops->get_regs_len(priv->ae_handle);
1612 reg_num = (reg_num + 3ul) & ~3ul;
1613 data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
1614 if (data) {
1615 ops->get_regs(priv->ae_handle, data);
1616 for (i = 0; i < reg_num; i += 4)
1617 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1618 i, data[i], data[i + 1],
1619 data[i + 2], data[i + 3]);
1620 kfree(data);
1621 }
1622 }
1623
1624 for (i = 0; i < h->q_num; i++) {
1625 pr_info("tx_queue%d_next_to_clean:%d\n",
1626 i, h->qs[i]->tx_ring.next_to_clean);
1627 pr_info("tx_queue%d_next_to_use:%d\n",
1628 i, h->qs[i]->tx_ring.next_to_use);
1629 pr_info("rx_queue%d_next_to_clean:%d\n",
1630 i, h->qs[i]->rx_ring.next_to_clean);
1631 pr_info("rx_queue%d_next_to_use:%d\n",
1632 i, h->qs[i]->rx_ring.next_to_use);
1633 }
1634}
1635
f7211729 1636/* for resetting subtask */
b5996f11 1637static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
1638{
1639 enum hnae_port_type type = priv->ae_handle->port_type;
1640
1641 if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
1642 return;
1643 clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
1644
1645 /* If we're already down, removing or resetting, just bail */
1646 if (test_bit(NIC_STATE_DOWN, &priv->state) ||
1647 test_bit(NIC_STATE_REMOVING, &priv->state) ||
1648 test_bit(NIC_STATE_RESETTING, &priv->state))
1649 return;
1650
1651 hns_nic_dump(priv);
13ac695e
S
1652 netdev_info(priv->netdev, "try to reset %s port!\n",
1653 (type == HNAE_PORT_DEBUG ? "debug" : "service"));
b5996f11 1654
1655 rtnl_lock();
90a505b9 1656 /* put off any impending NetWatchDogTimeout */
860e9538 1657 netif_trans_update(priv->netdev);
90a505b9 1658
13ac695e 1659 if (type == HNAE_PORT_DEBUG) {
b5996f11 1660 hns_nic_net_reinit(priv->netdev);
13ac695e
S
1661 } else {
1662 netif_carrier_off(priv->netdev);
1663 netif_tx_disable(priv->netdev);
1664 }
b5996f11 1665 rtnl_unlock();
1666}
1667
1668/* for doing service complete*/
1669static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
1670{
13ac695e 1671 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
b5996f11 1672
1673 smp_mb__before_atomic();
1674 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
1675}
1676
1677static void hns_nic_service_task(struct work_struct *work)
1678{
1679 struct hns_nic_priv *priv
1680 = container_of(work, struct hns_nic_priv, service_task);
1681 struct hnae_handle *h = priv->ae_handle;
1682
1683 hns_nic_update_link_status(priv->netdev);
1684 h->dev->ops->update_led_status(h);
1685 hns_nic_update_stats(priv->netdev);
1686
1687 hns_nic_reset_subtask(priv);
1688 hns_nic_service_event_complete(priv);
1689}
1690
1691static void hns_nic_task_schedule(struct hns_nic_priv *priv)
1692{
1693 if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
1694 !test_bit(NIC_STATE_REMOVING, &priv->state) &&
1695 !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
1696 (void)schedule_work(&priv->service_task);
1697}
1698
1699static void hns_nic_service_timer(unsigned long data)
1700{
1701 struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
1702
1703 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1704
1705 hns_nic_task_schedule(priv);
1706}
1707
1708/**
1709 * hns_tx_timeout_reset - initiate reset due to Tx timeout
1710 * @priv: driver private struct
1711 **/
1712static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
1713{
1714 /* Do the reset outside of interrupt context */
1715 if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
1716 set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
1717 netdev_warn(priv->netdev,
1718 "initiating reset due to tx timeout(%llu,0x%lx)\n",
1719 priv->tx_timeout_count, priv->state);
1720 priv->tx_timeout_count++;
1721 hns_nic_task_schedule(priv);
1722 }
1723}
1724
1725static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1726{
1727 struct hnae_handle *h = priv->ae_handle;
1728 struct hns_nic_ring_data *rd;
4b34aa41 1729 bool is_ver1 = AE_IS_VER1(priv->enet_ver);
b5996f11 1730 int i;
1731
1732 if (h->q_num > NIC_MAX_Q_PER_VF) {
1733 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
1734 return -EINVAL;
1735 }
1736
1737 priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
1738 GFP_KERNEL);
1739 if (!priv->ring_data)
1740 return -ENOMEM;
1741
1742 for (i = 0; i < h->q_num; i++) {
1743 rd = &priv->ring_data[i];
1744 rd->queue_index = i;
1745 rd->ring = &h->qs[i]->tx_ring;
1746 rd->poll_one = hns_nic_tx_poll_one;
4b34aa41 1747 rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL;
b5996f11 1748
1749 netif_napi_add(priv->netdev, &rd->napi,
1750 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
1751 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1752 }
1753 for (i = h->q_num; i < h->q_num * 2; i++) {
1754 rd = &priv->ring_data[i];
1755 rd->queue_index = i - h->q_num;
1756 rd->ring = &h->qs[i - h->q_num]->rx_ring;
1757 rd->poll_one = hns_nic_rx_poll_one;
1758 rd->ex_process = hns_nic_rx_up_pro;
4b34aa41 1759 rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL;
b5996f11 1760
1761 netif_napi_add(priv->netdev, &rd->napi,
1762 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
1763 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1764 }
1765
1766 return 0;
1767}
1768
1769static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
1770{
1771 struct hnae_handle *h = priv->ae_handle;
1772 int i;
1773
1774 for (i = 0; i < h->q_num * 2; i++) {
1775 netif_napi_del(&priv->ring_data[i].napi);
1776 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
13ac695e
S
1777 (void)irq_set_affinity_hint(
1778 priv->ring_data[i].ring->irq,
1779 NULL);
b5996f11 1780 free_irq(priv->ring_data[i].ring->irq,
1781 &priv->ring_data[i]);
1782 }
1783
1784 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1785 }
1786 kfree(priv->ring_data);
1787}
1788
13ac695e
S
1789static void hns_nic_set_priv_ops(struct net_device *netdev)
1790{
1791 struct hns_nic_priv *priv = netdev_priv(netdev);
64353af6 1792 struct hnae_handle *h = priv->ae_handle;
13ac695e
S
1793
1794 if (AE_IS_VER1(priv->enet_ver)) {
1795 priv->ops.fill_desc = fill_desc;
1796 priv->ops.get_rxd_bnum = get_rx_desc_bnum;
1797 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1798 } else {
1799 priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
64353af6
S
1800 if ((netdev->features & NETIF_F_TSO) ||
1801 (netdev->features & NETIF_F_TSO6)) {
1802 priv->ops.fill_desc = fill_tso_desc;
1803 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1804 /* This chip only support 7*4096 */
1805 netif_set_gso_max_size(netdev, 7 * 4096);
1806 h->dev->ops->set_tso_stats(h, 1);
1807 } else {
1808 priv->ops.fill_desc = fill_v2_desc;
1809 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1810 }
13ac695e
S
1811 }
1812}
1813
b5996f11 1814static int hns_nic_try_get_ae(struct net_device *ndev)
1815{
1816 struct hns_nic_priv *priv = netdev_priv(ndev);
1817 struct hnae_handle *h;
1818 int ret;
1819
1820 h = hnae_get_handle(&priv->netdev->dev,
7b2acae6 1821 priv->fwnode, priv->port_id, NULL);
b5996f11 1822 if (IS_ERR_OR_NULL(h)) {
daa8cfd9 1823 ret = -ENODEV;
b5996f11 1824 dev_dbg(priv->dev, "has not handle, register notifier!\n");
1825 goto out;
1826 }
1827 priv->ae_handle = h;
1828
1829 ret = hns_nic_init_phy(ndev, h);
1830 if (ret) {
1831 dev_err(priv->dev, "probe phy device fail!\n");
1832 goto out_init_phy;
1833 }
1834
1835 ret = hns_nic_init_ring_data(priv);
1836 if (ret) {
1837 ret = -ENOMEM;
1838 goto out_init_ring_data;
1839 }
1840
13ac695e
S
1841 hns_nic_set_priv_ops(ndev);
1842
b5996f11 1843 ret = register_netdev(ndev);
1844 if (ret) {
1845 dev_err(priv->dev, "probe register netdev fail!\n");
1846 goto out_reg_ndev_fail;
1847 }
1848 return 0;
1849
1850out_reg_ndev_fail:
1851 hns_nic_uninit_ring_data(priv);
1852 priv->ring_data = NULL;
1853out_init_phy:
1854out_init_ring_data:
1855 hnae_put_handle(priv->ae_handle);
1856 priv->ae_handle = NULL;
1857out:
1858 return ret;
1859}
1860
1861static int hns_nic_notifier_action(struct notifier_block *nb,
1862 unsigned long action, void *data)
1863{
1864 struct hns_nic_priv *priv =
1865 container_of(nb, struct hns_nic_priv, notifier_block);
1866
1867 assert(action == HNAE_AE_REGISTER);
1868
1869 if (!hns_nic_try_get_ae(priv->netdev)) {
1870 hnae_unregister_notifier(&priv->notifier_block);
1871 priv->notifier_block.notifier_call = NULL;
1872 }
1873 return 0;
1874}
1875
1876static int hns_nic_dev_probe(struct platform_device *pdev)
1877{
1878 struct device *dev = &pdev->dev;
1879 struct net_device *ndev;
1880 struct hns_nic_priv *priv;
406adee9 1881 u32 port_id;
b5996f11 1882 int ret;
1883
1884 ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
1885 if (!ndev)
1886 return -ENOMEM;
1887
1888 platform_set_drvdata(pdev, ndev);
1889
1890 priv = netdev_priv(ndev);
1891 priv->dev = dev;
1892 priv->netdev = ndev;
1893
63434888
KY
1894 if (dev_of_node(dev)) {
1895 struct device_node *ae_node;
b5996f11 1896
63434888
KY
1897 if (of_device_is_compatible(dev->of_node,
1898 "hisilicon,hns-nic-v1"))
1899 priv->enet_ver = AE_VERSION_1;
1900 else
1901 priv->enet_ver = AE_VERSION_2;
1902
1903 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
1904 if (IS_ERR_OR_NULL(ae_node)) {
1905 ret = PTR_ERR(ae_node);
1906 dev_err(dev, "not find ae-handle\n");
1907 goto out_read_prop_fail;
1908 }
1909 priv->fwnode = &ae_node->fwnode;
1910 } else if (is_acpi_node(dev->fwnode)) {
1911 struct acpi_reference_args args;
1912
1913 if (acpi_dev_found(hns_enet_acpi_match[0].id))
1914 priv->enet_ver = AE_VERSION_1;
1915 else if (acpi_dev_found(hns_enet_acpi_match[1].id))
1916 priv->enet_ver = AE_VERSION_2;
1917 else
1918 return -ENXIO;
1919
1920 /* try to find port-idx-in-ae first */
1921 ret = acpi_node_get_property_reference(dev->fwnode,
1922 "ae-handle", 0, &args);
1923 if (ret) {
1924 dev_err(dev, "not find ae-handle\n");
1925 goto out_read_prop_fail;
1926 }
1927 priv->fwnode = acpi_fwnode_handle(args.adev);
1928 } else {
1929 dev_err(dev, "cannot read cfg data from OF or acpi\n");
1930 return -ENXIO;
48189d6a 1931 }
7b2acae6 1932
6162928c 1933 ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
406adee9
YZZ
1934 if (ret) {
1935 /* only for old code compatible */
6162928c 1936 ret = device_property_read_u32(dev, "port-id", &port_id);
406adee9
YZZ
1937 if (ret)
1938 goto out_read_prop_fail;
1939 /* for old dts, we need to caculate the port offset */
1940 port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
1941 : port_id - HNS_SRV_OFFSET;
1942 }
1943 priv->port_id = port_id;
b5996f11 1944
1945 hns_init_mac_addr(ndev);
1946
1947 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
1948 ndev->priv_flags |= IFF_UNICAST_FLT;
1949 ndev->netdev_ops = &hns_nic_netdev_ops;
1950 hns_ethtool_set_ops(ndev);
13ac695e 1951
b5996f11 1952 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1953 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1954 NETIF_F_GRO;
1955 ndev->vlan_features |=
1956 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
1957 ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
1958
13ac695e
S
1959 switch (priv->enet_ver) {
1960 case AE_VERSION_2:
64353af6 1961 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
13ac695e
S
1962 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1963 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
64353af6 1964 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
13ac695e
S
1965 break;
1966 default:
1967 break;
1968 }
1969
b5996f11 1970 SET_NETDEV_DEV(ndev, dev);
1971
1972 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
1973 dev_dbg(dev, "set mask to 64bit\n");
1974 else
39c94417 1975 dev_err(dev, "set mask to 64bit fail!\n");
b5996f11 1976
1977 /* carrier off reporting is important to ethtool even BEFORE open */
1978 netif_carrier_off(ndev);
1979
1980 setup_timer(&priv->service_timer, hns_nic_service_timer,
1981 (unsigned long)priv);
1982 INIT_WORK(&priv->service_task, hns_nic_service_task);
1983
1984 set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
1985 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
1986 set_bit(NIC_STATE_DOWN, &priv->state);
1987
1988 if (hns_nic_try_get_ae(priv->netdev)) {
1989 priv->notifier_block.notifier_call = hns_nic_notifier_action;
1990 ret = hnae_register_notifier(&priv->notifier_block);
1991 if (ret) {
1992 dev_err(dev, "register notifier fail!\n");
1993 goto out_notify_fail;
1994 }
1995 dev_dbg(dev, "has not handle, register notifier!\n");
1996 }
1997
1998 return 0;
1999
2000out_notify_fail:
2001 (void)cancel_work_sync(&priv->service_task);
48189d6a 2002out_read_prop_fail:
b5996f11 2003 free_netdev(ndev);
2004 return ret;
2005}
2006
2007static int hns_nic_dev_remove(struct platform_device *pdev)
2008{
2009 struct net_device *ndev = platform_get_drvdata(pdev);
2010 struct hns_nic_priv *priv = netdev_priv(ndev);
2011
2012 if (ndev->reg_state != NETREG_UNINITIALIZED)
2013 unregister_netdev(ndev);
2014
2015 if (priv->ring_data)
2016 hns_nic_uninit_ring_data(priv);
2017 priv->ring_data = NULL;
2018
2019 if (priv->phy)
2020 phy_disconnect(priv->phy);
2021 priv->phy = NULL;
2022
2023 if (!IS_ERR_OR_NULL(priv->ae_handle))
2024 hnae_put_handle(priv->ae_handle);
2025 priv->ae_handle = NULL;
2026 if (priv->notifier_block.notifier_call)
2027 hnae_unregister_notifier(&priv->notifier_block);
2028 priv->notifier_block.notifier_call = NULL;
2029
2030 set_bit(NIC_STATE_REMOVING, &priv->state);
2031 (void)cancel_work_sync(&priv->service_task);
2032
2033 free_netdev(ndev);
2034 return 0;
2035}
2036
2037static const struct of_device_id hns_enet_of_match[] = {
2038 {.compatible = "hisilicon,hns-nic-v1",},
2039 {.compatible = "hisilicon,hns-nic-v2",},
2040 {},
2041};
2042
2043MODULE_DEVICE_TABLE(of, hns_enet_of_match);
2044
2045static struct platform_driver hns_nic_dev_driver = {
2046 .driver = {
2047 .name = "hns-nic",
b5996f11 2048 .of_match_table = hns_enet_of_match,
63434888 2049 .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
b5996f11 2050 },
2051 .probe = hns_nic_dev_probe,
2052 .remove = hns_nic_dev_remove,
2053};
2054
2055module_platform_driver(hns_nic_dev_driver);
2056
2057MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
2058MODULE_AUTHOR("Hisilicon, Inc.");
2059MODULE_LICENSE("GPL");
2060MODULE_ALIAS("platform:hns-nic");
This page took 0.157297 seconds and 5 git commands to generate.