ldmvsw: Split sunvnet driver into common code
[deliverable/linux.git] / drivers / net / ethernet / sun / sunvnet_common.c
1 /* sunvnet.c: Sun LDOM Virtual Network Driver.
2 *
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4 */
5
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/netdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/etherdevice.h>
15 #include <linux/mutex.h>
16 #include <linux/highmem.h>
17 #include <linux/if_vlan.h>
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/sunvnet.h>
20
21 #if IS_ENABLED(CONFIG_IPV6)
22 #include <linux/icmpv6.h>
23 #endif
24
25 #include <net/ip.h>
26 #include <net/icmp.h>
27 #include <net/route.h>
28
29 #include <asm/vio.h>
30 #include <asm/ldc.h>
31
32 #include "sunvnet_common.h"
33
34 /* Heuristic for the number of times to exponentially backoff and
35 * retry sending an LDC trigger when EAGAIN is encountered
36 */
37 #define VNET_MAX_RETRIES 10
38
39 static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
40 static void vnet_port_reset(struct vnet_port *port);
41
42 static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
43 {
44 return vio_dring_avail(dr, VNET_TX_RING_SIZE);
45 }
46
47 static int vnet_handle_unknown(struct vnet_port *port, void *arg)
48 {
49 struct vio_msg_tag *pkt = arg;
50
51 pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
52 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
53 pr_err("Resetting connection\n");
54
55 ldc_disconnect(port->vio.lp);
56
57 return -ECONNRESET;
58 }
59
60 static int vnet_port_alloc_tx_ring(struct vnet_port *port);
61
62 int sunvnet_send_attr_common(struct vio_driver_state *vio)
63 {
64 struct vnet_port *port = to_vnet_port(vio);
65 struct net_device *dev = port->vp->dev;
66 struct vio_net_attr_info pkt;
67 int framelen = ETH_FRAME_LEN;
68 int i, err;
69
70 err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
71 if (err)
72 return err;
73
74 memset(&pkt, 0, sizeof(pkt));
75 pkt.tag.type = VIO_TYPE_CTRL;
76 pkt.tag.stype = VIO_SUBTYPE_INFO;
77 pkt.tag.stype_env = VIO_ATTR_INFO;
78 pkt.tag.sid = vio_send_sid(vio);
79 if (vio_version_before(vio, 1, 2))
80 pkt.xfer_mode = VIO_DRING_MODE;
81 else
82 pkt.xfer_mode = VIO_NEW_DRING_MODE;
83 pkt.addr_type = VNET_ADDR_ETHERMAC;
84 pkt.ack_freq = 0;
85 for (i = 0; i < 6; i++)
86 pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
87 if (vio_version_after(vio, 1, 3)) {
88 if (port->rmtu) {
89 port->rmtu = min(VNET_MAXPACKET, port->rmtu);
90 pkt.mtu = port->rmtu;
91 } else {
92 port->rmtu = VNET_MAXPACKET;
93 pkt.mtu = port->rmtu;
94 }
95 if (vio_version_after_eq(vio, 1, 6))
96 pkt.options = VIO_TX_DRING;
97 } else if (vio_version_before(vio, 1, 3)) {
98 pkt.mtu = framelen;
99 } else { /* v1.3 */
100 pkt.mtu = framelen + VLAN_HLEN;
101 }
102
103 pkt.cflags = 0;
104 if (vio_version_after_eq(vio, 1, 7) && port->tso) {
105 pkt.cflags |= VNET_LSO_IPV4_CAPAB;
106 if (!port->tsolen)
107 port->tsolen = VNET_MAXTSO;
108 pkt.ipv4_lso_maxlen = port->tsolen;
109 }
110
111 pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
112
113 viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
114 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
115 "cflags[0x%04x] lso_max[%u]\n",
116 pkt.xfer_mode, pkt.addr_type,
117 (unsigned long long)pkt.addr,
118 pkt.ack_freq, pkt.plnk_updt, pkt.options,
119 (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
120
121
122 return vio_ldc_send(vio, &pkt, sizeof(pkt));
123 }
124 EXPORT_SYMBOL_GPL(sunvnet_send_attr_common);
125
126 static int handle_attr_info(struct vio_driver_state *vio,
127 struct vio_net_attr_info *pkt)
128 {
129 struct vnet_port *port = to_vnet_port(vio);
130 u64 localmtu;
131 u8 xfer_mode;
132
133 viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
134 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
135 " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
136 pkt->xfer_mode, pkt->addr_type,
137 (unsigned long long)pkt->addr,
138 pkt->ack_freq, pkt->plnk_updt, pkt->options,
139 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
140 pkt->ipv4_lso_maxlen);
141
142 pkt->tag.sid = vio_send_sid(vio);
143
144 xfer_mode = pkt->xfer_mode;
145 /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
146 if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
147 xfer_mode = VIO_NEW_DRING_MODE;
148
149 /* MTU negotiation:
150 * < v1.3 - ETH_FRAME_LEN exactly
151 * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
152 * pkt->mtu for ACK
153 * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
154 */
155 if (vio_version_before(vio, 1, 3)) {
156 localmtu = ETH_FRAME_LEN;
157 } else if (vio_version_after(vio, 1, 3)) {
158 localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
159 localmtu = min(pkt->mtu, localmtu);
160 pkt->mtu = localmtu;
161 } else { /* v1.3 */
162 localmtu = ETH_FRAME_LEN + VLAN_HLEN;
163 }
164 port->rmtu = localmtu;
165
166 /* LSO negotiation */
167 if (vio_version_after_eq(vio, 1, 7))
168 port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
169 else
170 port->tso = false;
171 if (port->tso) {
172 if (!port->tsolen)
173 port->tsolen = VNET_MAXTSO;
174 port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
175 if (port->tsolen < VNET_MINTSO) {
176 port->tso = false;
177 port->tsolen = 0;
178 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
179 }
180 pkt->ipv4_lso_maxlen = port->tsolen;
181 } else {
182 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
183 pkt->ipv4_lso_maxlen = 0;
184 }
185
186 /* for version >= 1.6, ACK packet mode we support */
187 if (vio_version_after_eq(vio, 1, 6)) {
188 pkt->xfer_mode = VIO_NEW_DRING_MODE;
189 pkt->options = VIO_TX_DRING;
190 }
191
192 if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
193 pkt->addr_type != VNET_ADDR_ETHERMAC ||
194 pkt->mtu != localmtu) {
195 viodbg(HS, "SEND NET ATTR NACK\n");
196
197 pkt->tag.stype = VIO_SUBTYPE_NACK;
198
199 (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
200
201 return -ECONNRESET;
202 } else {
203 viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
204 "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
205 "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
206 pkt->xfer_mode, pkt->addr_type,
207 (unsigned long long)pkt->addr,
208 pkt->ack_freq, pkt->plnk_updt, pkt->options,
209 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
210 pkt->ipv4_lso_maxlen);
211
212 pkt->tag.stype = VIO_SUBTYPE_ACK;
213
214 return vio_ldc_send(vio, pkt, sizeof(*pkt));
215 }
216
217 }
218
219 static int handle_attr_ack(struct vio_driver_state *vio,
220 struct vio_net_attr_info *pkt)
221 {
222 viodbg(HS, "GOT NET ATTR ACK\n");
223
224 return 0;
225 }
226
227 static int handle_attr_nack(struct vio_driver_state *vio,
228 struct vio_net_attr_info *pkt)
229 {
230 viodbg(HS, "GOT NET ATTR NACK\n");
231
232 return -ECONNRESET;
233 }
234
235 int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg)
236 {
237 struct vio_net_attr_info *pkt = arg;
238
239 switch (pkt->tag.stype) {
240 case VIO_SUBTYPE_INFO:
241 return handle_attr_info(vio, pkt);
242
243 case VIO_SUBTYPE_ACK:
244 return handle_attr_ack(vio, pkt);
245
246 case VIO_SUBTYPE_NACK:
247 return handle_attr_nack(vio, pkt);
248
249 default:
250 return -ECONNRESET;
251 }
252 }
253 EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common);
254
255 void sunvnet_handshake_complete_common(struct vio_driver_state *vio)
256 {
257 struct vio_dring_state *dr;
258
259 dr = &vio->drings[VIO_DRIVER_RX_RING];
260 dr->snd_nxt = dr->rcv_nxt = 1;
261
262 dr = &vio->drings[VIO_DRIVER_TX_RING];
263 dr->snd_nxt = dr->rcv_nxt = 1;
264 }
265 EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common);
266
267 /* The hypervisor interface that implements copying to/from imported
268 * memory from another domain requires that copies are done to 8-byte
269 * aligned buffers, and that the lengths of such copies are also 8-byte
270 * multiples.
271 *
272 * So we align skb->data to an 8-byte multiple and pad-out the data
273 * area so we can round the copy length up to the next multiple of
274 * 8 for the copy.
275 *
276 * The transmitter puts the actual start of the packet 6 bytes into
277 * the buffer it sends over, so that the IP headers after the ethernet
278 * header are aligned properly. These 6 bytes are not in the descriptor
279 * length, they are simply implied. This offset is represented using
280 * the VNET_PACKET_SKIP macro.
281 */
282 static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
283 unsigned int len)
284 {
285 struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
286 unsigned long addr, off;
287
288 if (unlikely(!skb))
289 return NULL;
290
291 addr = (unsigned long) skb->data;
292 off = ((addr + 7UL) & ~7UL) - addr;
293 if (off)
294 skb_reserve(skb, off);
295
296 return skb;
297 }
298
299 static inline void vnet_fullcsum(struct sk_buff *skb)
300 {
301 struct iphdr *iph = ip_hdr(skb);
302 int offset = skb_transport_offset(skb);
303
304 if (skb->protocol != htons(ETH_P_IP))
305 return;
306 if (iph->protocol != IPPROTO_TCP &&
307 iph->protocol != IPPROTO_UDP)
308 return;
309 skb->ip_summed = CHECKSUM_NONE;
310 skb->csum_level = 1;
311 skb->csum = 0;
312 if (iph->protocol == IPPROTO_TCP) {
313 struct tcphdr *ptcp = tcp_hdr(skb);
314
315 ptcp->check = 0;
316 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
317 ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
318 skb->len - offset, IPPROTO_TCP,
319 skb->csum);
320 } else if (iph->protocol == IPPROTO_UDP) {
321 struct udphdr *pudp = udp_hdr(skb);
322
323 pudp->check = 0;
324 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
325 pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
326 skb->len - offset, IPPROTO_UDP,
327 skb->csum);
328 }
329 }
330
331 static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
332 {
333 struct net_device *dev = port->vp->dev;
334 unsigned int len = desc->size;
335 unsigned int copy_len;
336 struct sk_buff *skb;
337 int maxlen;
338 int err;
339
340 err = -EMSGSIZE;
341 if (port->tso && port->tsolen > port->rmtu)
342 maxlen = port->tsolen;
343 else
344 maxlen = port->rmtu;
345 if (unlikely(len < ETH_ZLEN || len > maxlen)) {
346 dev->stats.rx_length_errors++;
347 goto out_dropped;
348 }
349
350 skb = alloc_and_align_skb(dev, len);
351 err = -ENOMEM;
352 if (unlikely(!skb)) {
353 dev->stats.rx_missed_errors++;
354 goto out_dropped;
355 }
356
357 copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
358 skb_put(skb, copy_len);
359 err = ldc_copy(port->vio.lp, LDC_COPY_IN,
360 skb->data, copy_len, 0,
361 desc->cookies, desc->ncookies);
362 if (unlikely(err < 0)) {
363 dev->stats.rx_frame_errors++;
364 goto out_free_skb;
365 }
366
367 skb_pull(skb, VNET_PACKET_SKIP);
368 skb_trim(skb, len);
369 skb->protocol = eth_type_trans(skb, dev);
370
371 if (vio_version_after_eq(&port->vio, 1, 8)) {
372 struct vio_net_dext *dext = vio_net_ext(desc);
373
374 skb_reset_network_header(skb);
375
376 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
377 if (skb->protocol == ETH_P_IP) {
378 struct iphdr *iph = ip_hdr(skb);
379
380 iph->check = 0;
381 ip_send_check(iph);
382 }
383 }
384 if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
385 skb->ip_summed == CHECKSUM_NONE) {
386 if (skb->protocol == htons(ETH_P_IP)) {
387 struct iphdr *iph = ip_hdr(skb);
388 int ihl = iph->ihl * 4;
389
390 skb_reset_transport_header(skb);
391 skb_set_transport_header(skb, ihl);
392 vnet_fullcsum(skb);
393 }
394 }
395 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
396 skb->ip_summed = CHECKSUM_PARTIAL;
397 skb->csum_level = 0;
398 if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
399 skb->csum_level = 1;
400 }
401 }
402
403 skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
404
405 dev->stats.rx_packets++;
406 dev->stats.rx_bytes += len;
407 napi_gro_receive(&port->napi, skb);
408 return 0;
409
410 out_free_skb:
411 kfree_skb(skb);
412
413 out_dropped:
414 dev->stats.rx_dropped++;
415 return err;
416 }
417
418 static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
419 u32 start, u32 end, u8 vio_dring_state)
420 {
421 struct vio_dring_data hdr = {
422 .tag = {
423 .type = VIO_TYPE_DATA,
424 .stype = VIO_SUBTYPE_ACK,
425 .stype_env = VIO_DRING_DATA,
426 .sid = vio_send_sid(&port->vio),
427 },
428 .dring_ident = dr->ident,
429 .start_idx = start,
430 .end_idx = end,
431 .state = vio_dring_state,
432 };
433 int err, delay;
434 int retries = 0;
435
436 hdr.seq = dr->snd_nxt;
437 delay = 1;
438 do {
439 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
440 if (err > 0) {
441 dr->snd_nxt++;
442 break;
443 }
444 udelay(delay);
445 if ((delay <<= 1) > 128)
446 delay = 128;
447 if (retries++ > VNET_MAX_RETRIES) {
448 pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
449 port->raddr[0], port->raddr[1],
450 port->raddr[2], port->raddr[3],
451 port->raddr[4], port->raddr[5]);
452 break;
453 }
454 } while (err == -EAGAIN);
455
456 if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
457 port->stop_rx_idx = end;
458 port->stop_rx = true;
459 } else {
460 port->stop_rx_idx = 0;
461 port->stop_rx = false;
462 }
463
464 return err;
465 }
466
467 static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
468 struct vio_dring_state *dr,
469 u32 index)
470 {
471 struct vio_net_desc *desc = port->vio.desc_buf;
472 int err;
473
474 err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
475 (index * dr->entry_size),
476 dr->cookies, dr->ncookies);
477 if (err < 0)
478 return ERR_PTR(err);
479
480 return desc;
481 }
482
483 static int put_rx_desc(struct vnet_port *port,
484 struct vio_dring_state *dr,
485 struct vio_net_desc *desc,
486 u32 index)
487 {
488 int err;
489
490 err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
491 (index * dr->entry_size),
492 dr->cookies, dr->ncookies);
493 if (err < 0)
494 return err;
495
496 return 0;
497 }
498
499 static int vnet_walk_rx_one(struct vnet_port *port,
500 struct vio_dring_state *dr,
501 u32 index, int *needs_ack)
502 {
503 struct vio_net_desc *desc = get_rx_desc(port, dr, index);
504 struct vio_driver_state *vio = &port->vio;
505 int err;
506
507 BUG_ON(desc == NULL);
508 if (IS_ERR(desc))
509 return PTR_ERR(desc);
510
511 if (desc->hdr.state != VIO_DESC_READY)
512 return 1;
513
514 dma_rmb();
515
516 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
517 desc->hdr.state, desc->hdr.ack,
518 desc->size, desc->ncookies,
519 desc->cookies[0].cookie_addr,
520 desc->cookies[0].cookie_size);
521
522 err = vnet_rx_one(port, desc);
523 if (err == -ECONNRESET)
524 return err;
525 trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid,
526 index, desc->hdr.ack);
527 desc->hdr.state = VIO_DESC_DONE;
528 err = put_rx_desc(port, dr, desc, index);
529 if (err < 0)
530 return err;
531 *needs_ack = desc->hdr.ack;
532 return 0;
533 }
534
535 static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
536 u32 start, u32 end, int *npkts, int budget)
537 {
538 struct vio_driver_state *vio = &port->vio;
539 int ack_start = -1, ack_end = -1;
540 bool send_ack = true;
541
542 end = (end == (u32) -1) ? vio_dring_prev(dr, start)
543 : vio_dring_next(dr, end);
544
545 viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
546
547 while (start != end) {
548 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
549 if (err == -ECONNRESET)
550 return err;
551 if (err != 0)
552 break;
553 (*npkts)++;
554 if (ack_start == -1)
555 ack_start = start;
556 ack_end = start;
557 start = vio_dring_next(dr, start);
558 if (ack && start != end) {
559 err = vnet_send_ack(port, dr, ack_start, ack_end,
560 VIO_DRING_ACTIVE);
561 if (err == -ECONNRESET)
562 return err;
563 ack_start = -1;
564 }
565 if ((*npkts) >= budget) {
566 send_ack = false;
567 break;
568 }
569 }
570 if (unlikely(ack_start == -1))
571 ack_start = ack_end = vio_dring_prev(dr, start);
572 if (send_ack) {
573 port->napi_resume = false;
574 trace_vnet_tx_send_stopped_ack(port->vio._local_sid,
575 port->vio._peer_sid,
576 ack_end, *npkts);
577 return vnet_send_ack(port, dr, ack_start, ack_end,
578 VIO_DRING_STOPPED);
579 } else {
580 trace_vnet_tx_defer_stopped_ack(port->vio._local_sid,
581 port->vio._peer_sid,
582 ack_end, *npkts);
583 port->napi_resume = true;
584 port->napi_stop_idx = ack_end;
585 return 1;
586 }
587 }
588
589 static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
590 int budget)
591 {
592 struct vio_dring_data *pkt = msgbuf;
593 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
594 struct vio_driver_state *vio = &port->vio;
595
596 viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
597 pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
598
599 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
600 return 0;
601 if (unlikely(pkt->seq != dr->rcv_nxt)) {
602 pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
603 pkt->seq, dr->rcv_nxt);
604 return 0;
605 }
606
607 if (!port->napi_resume)
608 dr->rcv_nxt++;
609
610 /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
611
612 return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
613 npkts, budget);
614 }
615
616 static int idx_is_pending(struct vio_dring_state *dr, u32 end)
617 {
618 u32 idx = dr->cons;
619 int found = 0;
620
621 while (idx != dr->prod) {
622 if (idx == end) {
623 found = 1;
624 break;
625 }
626 idx = vio_dring_next(dr, idx);
627 }
628 return found;
629 }
630
631 static int vnet_ack(struct vnet_port *port, void *msgbuf)
632 {
633 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
634 struct vio_dring_data *pkt = msgbuf;
635 struct net_device *dev;
636 struct vnet *vp;
637 u32 end;
638 struct vio_net_desc *desc;
639 struct netdev_queue *txq;
640
641 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
642 return 0;
643
644 end = pkt->end_idx;
645 vp = port->vp;
646 dev = vp->dev;
647 netif_tx_lock(dev);
648 if (unlikely(!idx_is_pending(dr, end))) {
649 netif_tx_unlock(dev);
650 return 0;
651 }
652
653 /* sync for race conditions with vnet_start_xmit() and tell xmit it
654 * is time to send a trigger.
655 */
656 trace_vnet_rx_stopped_ack(port->vio._local_sid,
657 port->vio._peer_sid, end);
658 dr->cons = vio_dring_next(dr, end);
659 desc = vio_dring_entry(dr, dr->cons);
660 if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
661 /* vnet_start_xmit() just populated this dring but missed
662 * sending the "start" LDC message to the consumer.
663 * Send a "start" trigger on its behalf.
664 */
665 if (__vnet_tx_trigger(port, dr->cons) > 0)
666 port->start_cons = false;
667 else
668 port->start_cons = true;
669 } else {
670 port->start_cons = true;
671 }
672 netif_tx_unlock(dev);
673
674 txq = netdev_get_tx_queue(dev, port->q_index);
675 if (unlikely(netif_tx_queue_stopped(txq) &&
676 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
677 return 1;
678
679 return 0;
680 }
681
682 static int vnet_nack(struct vnet_port *port, void *msgbuf)
683 {
684 /* XXX just reset or similar XXX */
685 return 0;
686 }
687
688 static int handle_mcast(struct vnet_port *port, void *msgbuf)
689 {
690 struct vio_net_mcast_info *pkt = msgbuf;
691
692 if (pkt->tag.stype != VIO_SUBTYPE_ACK)
693 pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
694 port->vp->dev->name,
695 pkt->tag.type,
696 pkt->tag.stype,
697 pkt->tag.stype_env,
698 pkt->tag.sid);
699
700 return 0;
701 }
702
703 /* Got back a STOPPED LDC message on port. If the queue is stopped,
704 * wake it up so that we'll send out another START message at the
705 * next TX.
706 */
707 static void maybe_tx_wakeup(struct vnet_port *port)
708 {
709 struct netdev_queue *txq;
710
711 txq = netdev_get_tx_queue(port->vp->dev, port->q_index);
712 __netif_tx_lock(txq, smp_processor_id());
713 if (likely(netif_tx_queue_stopped(txq))) {
714 struct vio_dring_state *dr;
715
716 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
717 netif_tx_wake_queue(txq);
718 }
719 __netif_tx_unlock(txq);
720 }
721
722 static inline bool port_is_up(struct vnet_port *vnet)
723 {
724 struct vio_driver_state *vio = &vnet->vio;
725
726 return !!(vio->hs_state & VIO_HS_COMPLETE);
727 }
728
729 static int vnet_event_napi(struct vnet_port *port, int budget)
730 {
731 struct vio_driver_state *vio = &port->vio;
732 int tx_wakeup, err;
733 int npkts = 0;
734 int event = (port->rx_event & LDC_EVENT_RESET);
735
736 ldc_ctrl:
737 if (unlikely(event == LDC_EVENT_RESET ||
738 event == LDC_EVENT_UP)) {
739 vio_link_state_change(vio, event);
740
741 if (event == LDC_EVENT_RESET) {
742 vnet_port_reset(port);
743 vio_port_up(vio);
744 }
745 port->rx_event = 0;
746 return 0;
747 }
748 /* We may have multiple LDC events in rx_event. Unroll send_events() */
749 event = (port->rx_event & LDC_EVENT_UP);
750 port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP);
751 if (event == LDC_EVENT_UP)
752 goto ldc_ctrl;
753 event = port->rx_event;
754 if (!(event & LDC_EVENT_DATA_READY))
755 return 0;
756
757 /* we dont expect any other bits than RESET, UP, DATA_READY */
758 BUG_ON(event != LDC_EVENT_DATA_READY);
759
760 tx_wakeup = err = 0;
761 while (1) {
762 union {
763 struct vio_msg_tag tag;
764 u64 raw[8];
765 } msgbuf;
766
767 if (port->napi_resume) {
768 struct vio_dring_data *pkt =
769 (struct vio_dring_data *)&msgbuf;
770 struct vio_dring_state *dr =
771 &port->vio.drings[VIO_DRIVER_RX_RING];
772
773 pkt->tag.type = VIO_TYPE_DATA;
774 pkt->tag.stype = VIO_SUBTYPE_INFO;
775 pkt->tag.stype_env = VIO_DRING_DATA;
776 pkt->seq = dr->rcv_nxt;
777 pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx);
778 pkt->end_idx = -1;
779 goto napi_resume;
780 }
781 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
782 if (unlikely(err < 0)) {
783 if (err == -ECONNRESET)
784 vio_conn_reset(vio);
785 break;
786 }
787 if (err == 0)
788 break;
789 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
790 msgbuf.tag.type,
791 msgbuf.tag.stype,
792 msgbuf.tag.stype_env,
793 msgbuf.tag.sid);
794 err = vio_validate_sid(vio, &msgbuf.tag);
795 if (err < 0)
796 break;
797 napi_resume:
798 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
799 if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
800 if (!port_is_up(port)) {
801 /* failures like handshake_failure()
802 * may have cleaned up dring, but
803 * NAPI polling may bring us here.
804 */
805 err = -ECONNRESET;
806 break;
807 }
808 err = vnet_rx(port, &msgbuf, &npkts, budget);
809 if (npkts >= budget)
810 break;
811 if (npkts == 0)
812 break;
813 } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
814 err = vnet_ack(port, &msgbuf);
815 if (err > 0)
816 tx_wakeup |= err;
817 } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
818 err = vnet_nack(port, &msgbuf);
819 }
820 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
821 if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
822 err = handle_mcast(port, &msgbuf);
823 else
824 err = vio_control_pkt_engine(vio, &msgbuf);
825 if (err)
826 break;
827 } else {
828 err = vnet_handle_unknown(port, &msgbuf);
829 }
830 if (err == -ECONNRESET)
831 break;
832 }
833 if (unlikely(tx_wakeup && err != -ECONNRESET))
834 maybe_tx_wakeup(port);
835 return npkts;
836 }
837
838 int sunvnet_poll_common(struct napi_struct *napi, int budget)
839 {
840 struct vnet_port *port = container_of(napi, struct vnet_port, napi);
841 struct vio_driver_state *vio = &port->vio;
842 int processed = vnet_event_napi(port, budget);
843
844 if (processed < budget) {
845 napi_complete(napi);
846 port->rx_event &= ~LDC_EVENT_DATA_READY;
847 vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
848 }
849 return processed;
850 }
851 EXPORT_SYMBOL_GPL(sunvnet_poll_common);
852
853 void sunvnet_event_common(void *arg, int event)
854 {
855 struct vnet_port *port = arg;
856 struct vio_driver_state *vio = &port->vio;
857
858 port->rx_event |= event;
859 vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
860 napi_schedule(&port->napi);
861
862 }
863 EXPORT_SYMBOL_GPL(sunvnet_event_common);
864
865 static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
866 {
867 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
868 struct vio_dring_data hdr = {
869 .tag = {
870 .type = VIO_TYPE_DATA,
871 .stype = VIO_SUBTYPE_INFO,
872 .stype_env = VIO_DRING_DATA,
873 .sid = vio_send_sid(&port->vio),
874 },
875 .dring_ident = dr->ident,
876 .start_idx = start,
877 .end_idx = (u32) -1,
878 };
879 int err, delay;
880 int retries = 0;
881
882 if (port->stop_rx) {
883 trace_vnet_tx_pending_stopped_ack(port->vio._local_sid,
884 port->vio._peer_sid,
885 port->stop_rx_idx, -1);
886 err = vnet_send_ack(port,
887 &port->vio.drings[VIO_DRIVER_RX_RING],
888 port->stop_rx_idx, -1,
889 VIO_DRING_STOPPED);
890 if (err <= 0)
891 return err;
892 }
893
894 hdr.seq = dr->snd_nxt;
895 delay = 1;
896 do {
897 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
898 if (err > 0) {
899 dr->snd_nxt++;
900 break;
901 }
902 udelay(delay);
903 if ((delay <<= 1) > 128)
904 delay = 128;
905 if (retries++ > VNET_MAX_RETRIES)
906 break;
907 } while (err == -EAGAIN);
908 trace_vnet_tx_trigger(port->vio._local_sid,
909 port->vio._peer_sid, start, err);
910
911 return err;
912 }
913
914 static struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
915 {
916 unsigned int hash = vnet_hashfn(skb->data);
917 struct hlist_head *hp = &vp->port_hash[hash];
918 struct vnet_port *port;
919
920 hlist_for_each_entry_rcu(port, hp, hash) {
921 if (!port_is_up(port))
922 continue;
923 if (ether_addr_equal(port->raddr, skb->data))
924 return port;
925 }
926 list_for_each_entry_rcu(port, &vp->port_list, list) {
927 if (!port->switch_port)
928 continue;
929 if (!port_is_up(port))
930 continue;
931 return port;
932 }
933 return NULL;
934 }
935
936 static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
937 unsigned *pending)
938 {
939 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
940 struct sk_buff *skb = NULL;
941 int i, txi;
942
943 *pending = 0;
944
945 txi = dr->prod;
946 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
947 struct vio_net_desc *d;
948
949 --txi;
950 if (txi < 0)
951 txi = VNET_TX_RING_SIZE-1;
952
953 d = vio_dring_entry(dr, txi);
954
955 if (d->hdr.state == VIO_DESC_READY) {
956 (*pending)++;
957 continue;
958 }
959 if (port->tx_bufs[txi].skb) {
960 if (d->hdr.state != VIO_DESC_DONE)
961 pr_notice("invalid ring buffer state %d\n",
962 d->hdr.state);
963 BUG_ON(port->tx_bufs[txi].skb->next);
964
965 port->tx_bufs[txi].skb->next = skb;
966 skb = port->tx_bufs[txi].skb;
967 port->tx_bufs[txi].skb = NULL;
968
969 ldc_unmap(port->vio.lp,
970 port->tx_bufs[txi].cookies,
971 port->tx_bufs[txi].ncookies);
972 } else if (d->hdr.state == VIO_DESC_FREE)
973 break;
974 d->hdr.state = VIO_DESC_FREE;
975 }
976 return skb;
977 }
978
979 static inline void vnet_free_skbs(struct sk_buff *skb)
980 {
981 struct sk_buff *next;
982
983 while (skb) {
984 next = skb->next;
985 skb->next = NULL;
986 dev_kfree_skb(skb);
987 skb = next;
988 }
989 }
990
991 void sunvnet_clean_timer_expire_common(unsigned long port0)
992 {
993 struct vnet_port *port = (struct vnet_port *)port0;
994 struct sk_buff *freeskbs;
995 unsigned pending;
996
997 netif_tx_lock(port->vp->dev);
998 freeskbs = vnet_clean_tx_ring(port, &pending);
999 netif_tx_unlock(port->vp->dev);
1000
1001 vnet_free_skbs(freeskbs);
1002
1003 if (pending)
1004 (void)mod_timer(&port->clean_timer,
1005 jiffies + VNET_CLEAN_TIMEOUT);
1006 else
1007 del_timer(&port->clean_timer);
1008 }
1009 EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common);
1010
1011 static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
1012 struct ldc_trans_cookie *cookies, int ncookies,
1013 unsigned int map_perm)
1014 {
1015 int i, nc, err, blen;
1016
1017 /* header */
1018 blen = skb_headlen(skb);
1019 if (blen < ETH_ZLEN)
1020 blen = ETH_ZLEN;
1021 blen += VNET_PACKET_SKIP;
1022 blen += 8 - (blen & 7);
1023
1024 err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies,
1025 ncookies, map_perm);
1026 if (err < 0)
1027 return err;
1028 nc = err;
1029
1030 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1031 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1032 u8 *vaddr;
1033
1034 if (nc < ncookies) {
1035 vaddr = kmap_atomic(skb_frag_page(f));
1036 blen = skb_frag_size(f);
1037 blen += 8 - (blen & 7);
1038 err = ldc_map_single(lp, vaddr + f->page_offset,
1039 blen, cookies + nc, ncookies - nc,
1040 map_perm);
1041 kunmap_atomic(vaddr);
1042 } else {
1043 err = -EMSGSIZE;
1044 }
1045
1046 if (err < 0) {
1047 ldc_unmap(lp, cookies, nc);
1048 return err;
1049 }
1050 nc += err;
1051 }
1052 return nc;
1053 }
1054
1055 static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
1056 {
1057 struct sk_buff *nskb;
1058 int i, len, pad, docopy;
1059
1060 len = skb->len;
1061 pad = 0;
1062 if (len < ETH_ZLEN) {
1063 pad += ETH_ZLEN - skb->len;
1064 len += pad;
1065 }
1066 len += VNET_PACKET_SKIP;
1067 pad += 8 - (len & 7);
1068
1069 /* make sure we have enough cookies and alignment in every frag */
1070 docopy = skb_shinfo(skb)->nr_frags >= ncookies;
1071 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1072 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1073
1074 docopy |= f->page_offset & 7;
1075 }
1076 if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
1077 skb_tailroom(skb) < pad ||
1078 skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
1079 int start = 0, offset;
1080 __wsum csum;
1081
1082 len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
1083 nskb = alloc_and_align_skb(skb->dev, len);
1084 if (nskb == NULL) {
1085 dev_kfree_skb(skb);
1086 return NULL;
1087 }
1088 skb_reserve(nskb, VNET_PACKET_SKIP);
1089
1090 nskb->protocol = skb->protocol;
1091 offset = skb_mac_header(skb) - skb->data;
1092 skb_set_mac_header(nskb, offset);
1093 offset = skb_network_header(skb) - skb->data;
1094 skb_set_network_header(nskb, offset);
1095 offset = skb_transport_header(skb) - skb->data;
1096 skb_set_transport_header(nskb, offset);
1097
1098 offset = 0;
1099 nskb->csum_offset = skb->csum_offset;
1100 nskb->ip_summed = skb->ip_summed;
1101
1102 if (skb->ip_summed == CHECKSUM_PARTIAL)
1103 start = skb_checksum_start_offset(skb);
1104 if (start) {
1105 struct iphdr *iph = ip_hdr(nskb);
1106 int offset = start + nskb->csum_offset;
1107
1108 if (skb_copy_bits(skb, 0, nskb->data, start)) {
1109 dev_kfree_skb(nskb);
1110 dev_kfree_skb(skb);
1111 return NULL;
1112 }
1113 *(__sum16 *)(skb->data + offset) = 0;
1114 csum = skb_copy_and_csum_bits(skb, start,
1115 nskb->data + start,
1116 skb->len - start, 0);
1117 if (iph->protocol == IPPROTO_TCP ||
1118 iph->protocol == IPPROTO_UDP) {
1119 csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
1120 skb->len - start,
1121 iph->protocol, csum);
1122 }
1123 *(__sum16 *)(nskb->data + offset) = csum;
1124
1125 nskb->ip_summed = CHECKSUM_NONE;
1126 } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
1127 dev_kfree_skb(nskb);
1128 dev_kfree_skb(skb);
1129 return NULL;
1130 }
1131 (void)skb_put(nskb, skb->len);
1132 if (skb_is_gso(skb)) {
1133 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
1134 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
1135 }
1136 nskb->queue_mapping = skb->queue_mapping;
1137 dev_kfree_skb(skb);
1138 skb = nskb;
1139 }
1140 return skb;
1141 }
1142
1143 u16 sunvnet_select_queue_common(struct net_device *dev, struct sk_buff *skb,
1144 void *accel_priv, select_queue_fallback_t fallback)
1145 {
1146 struct vnet *vp = netdev_priv(dev);
1147 struct vnet_port *port = __tx_port_find(vp, skb);
1148
1149 if (port == NULL)
1150 return 0;
1151 return port->q_index;
1152 }
1153 EXPORT_SYMBOL_GPL(sunvnet_select_queue_common);
1154
1155 static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
1156 {
1157 struct net_device *dev = port->vp->dev;
1158 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1159 struct sk_buff *segs;
1160 int maclen, datalen;
1161 int status;
1162 int gso_size, gso_type, gso_segs;
1163 int hlen = skb_transport_header(skb) - skb_mac_header(skb);
1164 int proto = IPPROTO_IP;
1165
1166 if (skb->protocol == htons(ETH_P_IP))
1167 proto = ip_hdr(skb)->protocol;
1168 else if (skb->protocol == htons(ETH_P_IPV6))
1169 proto = ipv6_hdr(skb)->nexthdr;
1170
1171 if (proto == IPPROTO_TCP)
1172 hlen += tcp_hdr(skb)->doff * 4;
1173 else if (proto == IPPROTO_UDP)
1174 hlen += sizeof(struct udphdr);
1175 else {
1176 pr_err("vnet_handle_offloads GSO with unknown transport "
1177 "protocol %d tproto %d\n", skb->protocol, proto);
1178 hlen = 128; /* XXX */
1179 }
1180 datalen = port->tsolen - hlen;
1181
1182 gso_size = skb_shinfo(skb)->gso_size;
1183 gso_type = skb_shinfo(skb)->gso_type;
1184 gso_segs = skb_shinfo(skb)->gso_segs;
1185
1186 if (port->tso && gso_size < datalen)
1187 gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
1188
1189 if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
1190 struct netdev_queue *txq;
1191
1192 txq = netdev_get_tx_queue(dev, port->q_index);
1193 netif_tx_stop_queue(txq);
1194 if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
1195 return NETDEV_TX_BUSY;
1196 netif_tx_wake_queue(txq);
1197 }
1198
1199 maclen = skb_network_header(skb) - skb_mac_header(skb);
1200 skb_pull(skb, maclen);
1201
1202 if (port->tso && gso_size < datalen) {
1203 if (skb_unclone(skb, GFP_ATOMIC))
1204 goto out_dropped;
1205
1206 /* segment to TSO size */
1207 skb_shinfo(skb)->gso_size = datalen;
1208 skb_shinfo(skb)->gso_segs = gso_segs;
1209 }
1210 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
1211 if (IS_ERR(segs))
1212 goto out_dropped;
1213
1214 skb_push(skb, maclen);
1215 skb_reset_mac_header(skb);
1216
1217 status = 0;
1218 while (segs) {
1219 struct sk_buff *curr = segs;
1220
1221 segs = segs->next;
1222 curr->next = NULL;
1223 if (port->tso && curr->len > dev->mtu) {
1224 skb_shinfo(curr)->gso_size = gso_size;
1225 skb_shinfo(curr)->gso_type = gso_type;
1226 skb_shinfo(curr)->gso_segs =
1227 DIV_ROUND_UP(curr->len - hlen, gso_size);
1228 } else
1229 skb_shinfo(curr)->gso_size = 0;
1230
1231 skb_push(curr, maclen);
1232 skb_reset_mac_header(curr);
1233 memcpy(skb_mac_header(curr), skb_mac_header(skb),
1234 maclen);
1235 curr->csum_start = skb_transport_header(curr) - curr->head;
1236 if (ip_hdr(curr)->protocol == IPPROTO_TCP)
1237 curr->csum_offset = offsetof(struct tcphdr, check);
1238 else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
1239 curr->csum_offset = offsetof(struct udphdr, check);
1240
1241 if (!(status & NETDEV_TX_MASK))
1242 status = sunvnet_start_xmit_common(curr, dev);
1243 if (status & NETDEV_TX_MASK)
1244 dev_kfree_skb_any(curr);
1245 }
1246
1247 if (!(status & NETDEV_TX_MASK))
1248 dev_kfree_skb_any(skb);
1249 return status;
1250 out_dropped:
1251 dev->stats.tx_dropped++;
1252 dev_kfree_skb_any(skb);
1253 return NETDEV_TX_OK;
1254 }
1255
1256 int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev)
1257 {
1258 struct vnet *vp = netdev_priv(dev);
1259 struct vnet_port *port = NULL;
1260 struct vio_dring_state *dr;
1261 struct vio_net_desc *d;
1262 unsigned int len;
1263 struct sk_buff *freeskbs = NULL;
1264 int i, err, txi;
1265 unsigned pending = 0;
1266 struct netdev_queue *txq;
1267
1268 rcu_read_lock();
1269 port = __tx_port_find(vp, skb);
1270 if (unlikely(!port)) {
1271 rcu_read_unlock();
1272 goto out_dropped;
1273 }
1274
1275 if (skb_is_gso(skb) && skb->len > port->tsolen) {
1276 err = vnet_handle_offloads(port, skb);
1277 rcu_read_unlock();
1278 return err;
1279 }
1280
1281 if (!skb_is_gso(skb) && skb->len > port->rmtu) {
1282 unsigned long localmtu = port->rmtu - ETH_HLEN;
1283
1284 if (vio_version_after_eq(&port->vio, 1, 3))
1285 localmtu -= VLAN_HLEN;
1286
1287 if (skb->protocol == htons(ETH_P_IP)) {
1288 struct flowi4 fl4;
1289 struct rtable *rt = NULL;
1290
1291 memset(&fl4, 0, sizeof(fl4));
1292 fl4.flowi4_oif = dev->ifindex;
1293 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
1294 fl4.daddr = ip_hdr(skb)->daddr;
1295 fl4.saddr = ip_hdr(skb)->saddr;
1296
1297 rt = ip_route_output_key(dev_net(dev), &fl4);
1298 rcu_read_unlock();
1299 if (!IS_ERR(rt)) {
1300 skb_dst_set(skb, &rt->dst);
1301 icmp_send(skb, ICMP_DEST_UNREACH,
1302 ICMP_FRAG_NEEDED,
1303 htonl(localmtu));
1304 }
1305 }
1306 #if IS_ENABLED(CONFIG_IPV6)
1307 else if (skb->protocol == htons(ETH_P_IPV6))
1308 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
1309 #endif
1310 goto out_dropped;
1311 }
1312
1313 skb = vnet_skb_shape(skb, 2);
1314
1315 if (unlikely(!skb))
1316 goto out_dropped;
1317
1318 if (skb->ip_summed == CHECKSUM_PARTIAL)
1319 vnet_fullcsum(skb);
1320
1321 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1322 i = skb_get_queue_mapping(skb);
1323 txq = netdev_get_tx_queue(dev, i);
1324 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1325 if (!netif_tx_queue_stopped(txq)) {
1326 netif_tx_stop_queue(txq);
1327
1328 /* This is a hard error, log it. */
1329 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1330 dev->stats.tx_errors++;
1331 }
1332 rcu_read_unlock();
1333 return NETDEV_TX_BUSY;
1334 }
1335
1336 d = vio_dring_cur(dr);
1337
1338 txi = dr->prod;
1339
1340 freeskbs = vnet_clean_tx_ring(port, &pending);
1341
1342 BUG_ON(port->tx_bufs[txi].skb);
1343
1344 len = skb->len;
1345 if (len < ETH_ZLEN)
1346 len = ETH_ZLEN;
1347
1348 err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
1349 (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
1350 if (err < 0) {
1351 netdev_info(dev, "tx buffer map error %d\n", err);
1352 goto out_dropped;
1353 }
1354
1355 port->tx_bufs[txi].skb = skb;
1356 skb = NULL;
1357 port->tx_bufs[txi].ncookies = err;
1358
1359 /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
1360 * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
1361 * the protocol itself does not require it as long as the peer
1362 * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
1363 *
1364 * An ACK for every packet in the ring is expensive as the
1365 * sending of LDC messages is slow and affects performance.
1366 */
1367 d->hdr.ack = VIO_ACK_DISABLE;
1368 d->size = len;
1369 d->ncookies = port->tx_bufs[txi].ncookies;
1370 for (i = 0; i < d->ncookies; i++)
1371 d->cookies[i] = port->tx_bufs[txi].cookies[i];
1372 if (vio_version_after_eq(&port->vio, 1, 7)) {
1373 struct vio_net_dext *dext = vio_net_ext(d);
1374
1375 memset(dext, 0, sizeof(*dext));
1376 if (skb_is_gso(port->tx_bufs[txi].skb)) {
1377 dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
1378 ->gso_size;
1379 dext->flags |= VNET_PKT_IPV4_LSO;
1380 }
1381 if (vio_version_after_eq(&port->vio, 1, 8) &&
1382 !port->switch_port) {
1383 dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
1384 dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
1385 }
1386 }
1387
1388 /* This has to be a non-SMP write barrier because we are writing
1389 * to memory which is shared with the peer LDOM.
1390 */
1391 dma_wmb();
1392
1393 d->hdr.state = VIO_DESC_READY;
1394
1395 /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
1396 * to notify the consumer that some descriptors are READY.
1397 * After that "start" trigger, no additional triggers are needed until
1398 * a DRING_STOPPED is received from the consumer. The dr->cons field
1399 * (set up by vnet_ack()) has the value of the next dring index
1400 * that has not yet been ack-ed. We send a "start" trigger here
1401 * if, and only if, start_cons is true (reset it afterward). Conversely,
1402 * vnet_ack() should check if the dring corresponding to cons
1403 * is marked READY, but start_cons was false.
1404 * If so, vnet_ack() should send out the missed "start" trigger.
1405 *
1406 * Note that the dma_wmb() above makes sure the cookies et al. are
1407 * not globally visible before the VIO_DESC_READY, and that the
1408 * stores are ordered correctly by the compiler. The consumer will
1409 * not proceed until the VIO_DESC_READY is visible assuring that
1410 * the consumer does not observe anything related to descriptors
1411 * out of order. The HV trap from the LDC start trigger is the
1412 * producer to consumer announcement that work is available to the
1413 * consumer
1414 */
1415 if (!port->start_cons) { /* previous trigger suffices */
1416 trace_vnet_skip_tx_trigger(port->vio._local_sid,
1417 port->vio._peer_sid, dr->cons);
1418 goto ldc_start_done;
1419 }
1420
1421 err = __vnet_tx_trigger(port, dr->cons);
1422 if (unlikely(err < 0)) {
1423 netdev_info(dev, "TX trigger error %d\n", err);
1424 d->hdr.state = VIO_DESC_FREE;
1425 skb = port->tx_bufs[txi].skb;
1426 port->tx_bufs[txi].skb = NULL;
1427 dev->stats.tx_carrier_errors++;
1428 goto out_dropped;
1429 }
1430
1431 ldc_start_done:
1432 port->start_cons = false;
1433
1434 dev->stats.tx_packets++;
1435 dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1436
1437 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1438 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1439 netif_tx_stop_queue(txq);
1440 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
1441 netif_tx_wake_queue(txq);
1442 }
1443
1444 (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
1445 rcu_read_unlock();
1446
1447 vnet_free_skbs(freeskbs);
1448
1449 return NETDEV_TX_OK;
1450
1451 out_dropped:
1452 if (pending)
1453 (void)mod_timer(&port->clean_timer,
1454 jiffies + VNET_CLEAN_TIMEOUT);
1455 else if (port)
1456 del_timer(&port->clean_timer);
1457 if (port)
1458 rcu_read_unlock();
1459 if (skb)
1460 dev_kfree_skb(skb);
1461 vnet_free_skbs(freeskbs);
1462 dev->stats.tx_dropped++;
1463 return NETDEV_TX_OK;
1464 }
1465 EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common);
1466
1467 void sunvnet_tx_timeout_common(struct net_device *dev)
1468 {
1469 /* XXX Implement me XXX */
1470 }
1471 EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common);
1472
1473 int sunvnet_open_common(struct net_device *dev)
1474 {
1475 netif_carrier_on(dev);
1476 netif_tx_start_all_queues(dev);
1477
1478 return 0;
1479 }
1480 EXPORT_SYMBOL_GPL(sunvnet_open_common);
1481
1482 int sunvnet_close_common(struct net_device *dev)
1483 {
1484 netif_tx_stop_all_queues(dev);
1485 netif_carrier_off(dev);
1486
1487 return 0;
1488 }
1489 EXPORT_SYMBOL_GPL(sunvnet_close_common);
1490
1491 static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
1492 {
1493 struct vnet_mcast_entry *m;
1494
1495 for (m = vp->mcast_list; m; m = m->next) {
1496 if (ether_addr_equal(m->addr, addr))
1497 return m;
1498 }
1499 return NULL;
1500 }
1501
1502 static void __update_mc_list(struct vnet *vp, struct net_device *dev)
1503 {
1504 struct netdev_hw_addr *ha;
1505
1506 netdev_for_each_mc_addr(ha, dev) {
1507 struct vnet_mcast_entry *m;
1508
1509 m = __vnet_mc_find(vp, ha->addr);
1510 if (m) {
1511 m->hit = 1;
1512 continue;
1513 }
1514
1515 if (!m) {
1516 m = kzalloc(sizeof(*m), GFP_ATOMIC);
1517 if (!m)
1518 continue;
1519 memcpy(m->addr, ha->addr, ETH_ALEN);
1520 m->hit = 1;
1521
1522 m->next = vp->mcast_list;
1523 vp->mcast_list = m;
1524 }
1525 }
1526 }
1527
1528 static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
1529 {
1530 struct vio_net_mcast_info info;
1531 struct vnet_mcast_entry *m, **pp;
1532 int n_addrs;
1533
1534 memset(&info, 0, sizeof(info));
1535
1536 info.tag.type = VIO_TYPE_CTRL;
1537 info.tag.stype = VIO_SUBTYPE_INFO;
1538 info.tag.stype_env = VNET_MCAST_INFO;
1539 info.tag.sid = vio_send_sid(&port->vio);
1540 info.set = 1;
1541
1542 n_addrs = 0;
1543 for (m = vp->mcast_list; m; m = m->next) {
1544 if (m->sent)
1545 continue;
1546 m->sent = 1;
1547 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1548 m->addr, ETH_ALEN);
1549 if (++n_addrs == VNET_NUM_MCAST) {
1550 info.count = n_addrs;
1551
1552 (void) vio_ldc_send(&port->vio, &info,
1553 sizeof(info));
1554 n_addrs = 0;
1555 }
1556 }
1557 if (n_addrs) {
1558 info.count = n_addrs;
1559 (void) vio_ldc_send(&port->vio, &info, sizeof(info));
1560 }
1561
1562 info.set = 0;
1563
1564 n_addrs = 0;
1565 pp = &vp->mcast_list;
1566 while ((m = *pp) != NULL) {
1567 if (m->hit) {
1568 m->hit = 0;
1569 pp = &m->next;
1570 continue;
1571 }
1572
1573 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1574 m->addr, ETH_ALEN);
1575 if (++n_addrs == VNET_NUM_MCAST) {
1576 info.count = n_addrs;
1577 (void) vio_ldc_send(&port->vio, &info,
1578 sizeof(info));
1579 n_addrs = 0;
1580 }
1581
1582 *pp = m->next;
1583 kfree(m);
1584 }
1585 if (n_addrs) {
1586 info.count = n_addrs;
1587 (void) vio_ldc_send(&port->vio, &info, sizeof(info));
1588 }
1589 }
1590
1591 void sunvnet_set_rx_mode_common(struct net_device *dev)
1592 {
1593 struct vnet *vp = netdev_priv(dev);
1594 struct vnet_port *port;
1595
1596 rcu_read_lock();
1597 list_for_each_entry_rcu(port, &vp->port_list, list) {
1598
1599 if (port->switch_port) {
1600 __update_mc_list(vp, dev);
1601 __send_mc_list(vp, port);
1602 break;
1603 }
1604 }
1605 rcu_read_unlock();
1606 }
1607 EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common);
1608
1609 int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu)
1610 {
1611 if (new_mtu < 68 || new_mtu > 65535)
1612 return -EINVAL;
1613
1614 dev->mtu = new_mtu;
1615 return 0;
1616 }
1617 EXPORT_SYMBOL_GPL(sunvnet_change_mtu_common);
1618
1619 int sunvnet_set_mac_addr_common(struct net_device *dev, void *p)
1620 {
1621 return -EINVAL;
1622 }
1623 EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common);
1624
1625 void sunvnet_port_free_tx_bufs_common(struct vnet_port *port)
1626 {
1627 struct vio_dring_state *dr;
1628 int i;
1629
1630 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1631
1632 if (dr->base == NULL)
1633 return;
1634
1635 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1636 struct vio_net_desc *d;
1637 void *skb = port->tx_bufs[i].skb;
1638
1639 if (!skb)
1640 continue;
1641
1642 d = vio_dring_entry(dr, i);
1643
1644 ldc_unmap(port->vio.lp,
1645 port->tx_bufs[i].cookies,
1646 port->tx_bufs[i].ncookies);
1647 dev_kfree_skb(skb);
1648 port->tx_bufs[i].skb = NULL;
1649 d->hdr.state = VIO_DESC_FREE;
1650 }
1651 ldc_free_exp_dring(port->vio.lp, dr->base,
1652 (dr->entry_size * dr->num_entries),
1653 dr->cookies, dr->ncookies);
1654 dr->base = NULL;
1655 dr->entry_size = 0;
1656 dr->num_entries = 0;
1657 dr->pending = 0;
1658 dr->ncookies = 0;
1659 }
1660 EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
1661
1662 static void vnet_port_reset(struct vnet_port *port)
1663 {
1664 del_timer(&port->clean_timer);
1665 sunvnet_port_free_tx_bufs_common(port);
1666 port->rmtu = 0;
1667 port->tso = true;
1668 port->tsolen = 0;
1669 }
1670
1671 static int vnet_port_alloc_tx_ring(struct vnet_port *port)
1672 {
1673 struct vio_dring_state *dr;
1674 unsigned long len, elen;
1675 int i, err, ncookies;
1676 void *dring;
1677
1678 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1679
1680 elen = sizeof(struct vio_net_desc) +
1681 sizeof(struct ldc_trans_cookie) * 2;
1682 if (vio_version_after_eq(&port->vio, 1, 7))
1683 elen += sizeof(struct vio_net_dext);
1684 len = VNET_TX_RING_SIZE * elen;
1685
1686 ncookies = VIO_MAX_RING_COOKIES;
1687 dring = ldc_alloc_exp_dring(port->vio.lp, len,
1688 dr->cookies, &ncookies,
1689 (LDC_MAP_SHADOW |
1690 LDC_MAP_DIRECT |
1691 LDC_MAP_RW));
1692 if (IS_ERR(dring)) {
1693 err = PTR_ERR(dring);
1694 goto err_out;
1695 }
1696
1697 dr->base = dring;
1698 dr->entry_size = elen;
1699 dr->num_entries = VNET_TX_RING_SIZE;
1700 dr->prod = dr->cons = 0;
1701 port->start_cons = true; /* need an initial trigger */
1702 dr->pending = VNET_TX_RING_SIZE;
1703 dr->ncookies = ncookies;
1704
1705 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
1706 struct vio_net_desc *d;
1707
1708 d = vio_dring_entry(dr, i);
1709 d->hdr.state = VIO_DESC_FREE;
1710 }
1711 return 0;
1712
1713 err_out:
1714 sunvnet_port_free_tx_bufs_common(port);
1715
1716 return err;
1717 }
1718
1719 #ifdef CONFIG_NET_POLL_CONTROLLER
1720 void sunvnet_poll_controller_common(struct net_device *dev)
1721 {
1722 struct vnet *vp = netdev_priv(dev);
1723 struct vnet_port *port;
1724 unsigned long flags;
1725
1726 spin_lock_irqsave(&vp->lock, flags);
1727 if (!list_empty(&vp->port_list)) {
1728 port = list_entry(vp->port_list.next, struct vnet_port, list);
1729 napi_schedule(&port->napi);
1730 }
1731 spin_unlock_irqrestore(&vp->lock, flags);
1732 }
1733 EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common);
1734 #endif
1735
1736 void sunvnet_port_add_txq_common(struct vnet_port *port)
1737 {
1738 struct vnet *vp = port->vp;
1739 int n;
1740
1741 n = vp->nports++;
1742 n = n & (VNET_MAX_TXQS - 1);
1743 port->q_index = n;
1744 netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index));
1745 }
1746 EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
1747
1748 void sunvnet_port_rm_txq_common(struct vnet_port *port)
1749 {
1750 port->vp->nports--;
1751 netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index));
1752 }
1753 EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);
This page took 0.067915 seconds and 5 git commands to generate.