Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / drivers / net / xen-netback / netback.c
1 /*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35 #include "common.h"
36
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 #include <linux/highmem.h>
41
42 #include <net/tcp.h>
43
44 #include <xen/xen.h>
45 #include <xen/events.h>
46 #include <xen/interface/memory.h>
47
48 #include <asm/xen/hypercall.h>
49 #include <asm/xen/page.h>
50
51 /* Provide an option to disable split event channels at load time as
52 * event channels are limited resource. Split event channels are
53 * enabled by default.
54 */
55 bool separate_tx_rx_irq = 1;
56 module_param(separate_tx_rx_irq, bool, 0644);
57
58 /* When guest ring is filled up, qdisc queues the packets for us, but we have
59 * to timeout them, otherwise other guests' packets can get stuck there
60 */
61 unsigned int rx_drain_timeout_msecs = 10000;
62 module_param(rx_drain_timeout_msecs, uint, 0444);
63 unsigned int rx_drain_timeout_jiffies;
64
65 /*
66 * This is the maximum slots a skb can have. If a guest sends a skb
67 * which exceeds this limit it is considered malicious.
68 */
69 #define FATAL_SKB_SLOTS_DEFAULT 20
70 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
71 module_param(fatal_skb_slots, uint, 0444);
72
73 static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
74 u8 status);
75
76 static void make_tx_response(struct xenvif *vif,
77 struct xen_netif_tx_request *txp,
78 s8 st);
79
80 static inline int tx_work_todo(struct xenvif *vif);
81 static inline int rx_work_todo(struct xenvif *vif);
82
83 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
84 u16 id,
85 s8 st,
86 u16 offset,
87 u16 size,
88 u16 flags);
89
90 static inline unsigned long idx_to_pfn(struct xenvif *vif,
91 u16 idx)
92 {
93 return page_to_pfn(vif->mmap_pages[idx]);
94 }
95
96 static inline unsigned long idx_to_kaddr(struct xenvif *vif,
97 u16 idx)
98 {
99 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
100 }
101
102 #define callback_param(vif, pending_idx) \
103 (vif->pending_tx_info[pending_idx].callback_struct)
104
105 /* Find the containing VIF's structure from a pointer in pending_tx_info array
106 */
107 static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
108 {
109 u16 pending_idx = ubuf->desc;
110 struct pending_tx_info *temp =
111 container_of(ubuf, struct pending_tx_info, callback_struct);
112 return container_of(temp - pending_idx,
113 struct xenvif,
114 pending_tx_info[0]);
115 }
116
117 /* This is a miniumum size for the linear area to avoid lots of
118 * calls to __pskb_pull_tail() as we set up checksum offsets. The
119 * value 128 was chosen as it covers all IPv4 and most likely
120 * IPv6 headers.
121 */
122 #define PKT_PROT_LEN 128
123
124 static u16 frag_get_pending_idx(skb_frag_t *frag)
125 {
126 return (u16)frag->page_offset;
127 }
128
129 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
130 {
131 frag->page_offset = pending_idx;
132 }
133
134 static inline pending_ring_idx_t pending_index(unsigned i)
135 {
136 return i & (MAX_PENDING_REQS-1);
137 }
138
139 bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
140 {
141 RING_IDX prod, cons;
142
143 do {
144 prod = vif->rx.sring->req_prod;
145 cons = vif->rx.req_cons;
146
147 if (prod - cons >= needed)
148 return true;
149
150 vif->rx.sring->req_event = prod + 1;
151
152 /* Make sure event is visible before we check prod
153 * again.
154 */
155 mb();
156 } while (vif->rx.sring->req_prod != prod);
157
158 return false;
159 }
160
161 /*
162 * Returns true if we should start a new receive buffer instead of
163 * adding 'size' bytes to a buffer which currently contains 'offset'
164 * bytes.
165 */
166 static bool start_new_rx_buffer(int offset, unsigned long size, int head)
167 {
168 /* simple case: we have completely filled the current buffer. */
169 if (offset == MAX_BUFFER_OFFSET)
170 return true;
171
172 /*
173 * complex case: start a fresh buffer if the current frag
174 * would overflow the current buffer but only if:
175 * (i) this frag would fit completely in the next buffer
176 * and (ii) there is already some data in the current buffer
177 * and (iii) this is not the head buffer.
178 *
179 * Where:
180 * - (i) stops us splitting a frag into two copies
181 * unless the frag is too large for a single buffer.
182 * - (ii) stops us from leaving a buffer pointlessly empty.
183 * - (iii) stops us leaving the first buffer
184 * empty. Strictly speaking this is already covered
185 * by (ii) but is explicitly checked because
186 * netfront relies on the first buffer being
187 * non-empty and can crash otherwise.
188 *
189 * This means we will effectively linearise small
190 * frags but do not needlessly split large buffers
191 * into multiple copies tend to give large frags their
192 * own buffers as before.
193 */
194 BUG_ON(size > MAX_BUFFER_OFFSET);
195 if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
196 return true;
197
198 return false;
199 }
200
201 struct netrx_pending_operations {
202 unsigned copy_prod, copy_cons;
203 unsigned meta_prod, meta_cons;
204 struct gnttab_copy *copy;
205 struct xenvif_rx_meta *meta;
206 int copy_off;
207 grant_ref_t copy_gref;
208 };
209
210 static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
211 struct netrx_pending_operations *npo)
212 {
213 struct xenvif_rx_meta *meta;
214 struct xen_netif_rx_request *req;
215
216 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
217
218 meta = npo->meta + npo->meta_prod++;
219 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
220 meta->gso_size = 0;
221 meta->size = 0;
222 meta->id = req->id;
223
224 npo->copy_off = 0;
225 npo->copy_gref = req->gref;
226
227 return meta;
228 }
229
230 /*
231 * Set up the grant operations for this fragment. If it's a flipping
232 * interface, we also set up the unmap request from here.
233 */
234 static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
235 struct netrx_pending_operations *npo,
236 struct page *page, unsigned long size,
237 unsigned long offset, int *head,
238 struct xenvif *foreign_vif,
239 grant_ref_t foreign_gref)
240 {
241 struct gnttab_copy *copy_gop;
242 struct xenvif_rx_meta *meta;
243 unsigned long bytes;
244 int gso_type = XEN_NETIF_GSO_TYPE_NONE;
245
246 /* Data must not cross a page boundary. */
247 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
248
249 meta = npo->meta + npo->meta_prod - 1;
250
251 /* Skip unused frames from start of page */
252 page += offset >> PAGE_SHIFT;
253 offset &= ~PAGE_MASK;
254
255 while (size > 0) {
256 BUG_ON(offset >= PAGE_SIZE);
257 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
258
259 bytes = PAGE_SIZE - offset;
260
261 if (bytes > size)
262 bytes = size;
263
264 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
265 /*
266 * Netfront requires there to be some data in the head
267 * buffer.
268 */
269 BUG_ON(*head);
270
271 meta = get_next_rx_buffer(vif, npo);
272 }
273
274 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
275 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
276
277 copy_gop = npo->copy + npo->copy_prod++;
278 copy_gop->flags = GNTCOPY_dest_gref;
279 copy_gop->len = bytes;
280
281 if (foreign_vif) {
282 copy_gop->source.domid = foreign_vif->domid;
283 copy_gop->source.u.ref = foreign_gref;
284 copy_gop->flags |= GNTCOPY_source_gref;
285 } else {
286 copy_gop->source.domid = DOMID_SELF;
287 copy_gop->source.u.gmfn =
288 virt_to_mfn(page_address(page));
289 }
290 copy_gop->source.offset = offset;
291
292 copy_gop->dest.domid = vif->domid;
293 copy_gop->dest.offset = npo->copy_off;
294 copy_gop->dest.u.ref = npo->copy_gref;
295
296 npo->copy_off += bytes;
297 meta->size += bytes;
298
299 offset += bytes;
300 size -= bytes;
301
302 /* Next frame */
303 if (offset == PAGE_SIZE && size) {
304 BUG_ON(!PageCompound(page));
305 page++;
306 offset = 0;
307 }
308
309 /* Leave a gap for the GSO descriptor. */
310 if (skb_is_gso(skb)) {
311 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
312 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
313 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
314 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
315 }
316
317 if (*head && ((1 << gso_type) & vif->gso_mask))
318 vif->rx.req_cons++;
319
320 *head = 0; /* There must be something in this buffer now. */
321
322 }
323 }
324
325 /*
326 * Prepare an SKB to be transmitted to the frontend.
327 *
328 * This function is responsible for allocating grant operations, meta
329 * structures, etc.
330 *
331 * It returns the number of meta structures consumed. The number of
332 * ring slots used is always equal to the number of meta slots used
333 * plus the number of GSO descriptors used. Currently, we use either
334 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
335 * frontend-side LRO).
336 */
337 static int xenvif_gop_skb(struct sk_buff *skb,
338 struct netrx_pending_operations *npo)
339 {
340 struct xenvif *vif = netdev_priv(skb->dev);
341 int nr_frags = skb_shinfo(skb)->nr_frags;
342 int i;
343 struct xen_netif_rx_request *req;
344 struct xenvif_rx_meta *meta;
345 unsigned char *data;
346 int head = 1;
347 int old_meta_prod;
348 int gso_type;
349 struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
350 grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
351 struct xenvif *foreign_vif = NULL;
352
353 old_meta_prod = npo->meta_prod;
354
355 gso_type = XEN_NETIF_GSO_TYPE_NONE;
356 if (skb_is_gso(skb)) {
357 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
358 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
359 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
360 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
361 }
362
363 /* Set up a GSO prefix descriptor, if necessary */
364 if ((1 << gso_type) & vif->gso_prefix_mask) {
365 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
366 meta = npo->meta + npo->meta_prod++;
367 meta->gso_type = gso_type;
368 meta->gso_size = skb_shinfo(skb)->gso_size;
369 meta->size = 0;
370 meta->id = req->id;
371 }
372
373 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
374 meta = npo->meta + npo->meta_prod++;
375
376 if ((1 << gso_type) & vif->gso_mask) {
377 meta->gso_type = gso_type;
378 meta->gso_size = skb_shinfo(skb)->gso_size;
379 } else {
380 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
381 meta->gso_size = 0;
382 }
383
384 meta->size = 0;
385 meta->id = req->id;
386 npo->copy_off = 0;
387 npo->copy_gref = req->gref;
388
389 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
390 (ubuf->callback == &xenvif_zerocopy_callback)) {
391 int i = 0;
392 foreign_vif = ubuf_to_vif(ubuf);
393
394 do {
395 u16 pending_idx = ubuf->desc;
396 foreign_grefs[i++] =
397 foreign_vif->pending_tx_info[pending_idx].req.gref;
398 ubuf = (struct ubuf_info *) ubuf->ctx;
399 } while (ubuf);
400 }
401
402 data = skb->data;
403 while (data < skb_tail_pointer(skb)) {
404 unsigned int offset = offset_in_page(data);
405 unsigned int len = PAGE_SIZE - offset;
406
407 if (data + len > skb_tail_pointer(skb))
408 len = skb_tail_pointer(skb) - data;
409
410 xenvif_gop_frag_copy(vif, skb, npo,
411 virt_to_page(data), len, offset, &head,
412 NULL,
413 0);
414 data += len;
415 }
416
417 for (i = 0; i < nr_frags; i++) {
418 xenvif_gop_frag_copy(vif, skb, npo,
419 skb_frag_page(&skb_shinfo(skb)->frags[i]),
420 skb_frag_size(&skb_shinfo(skb)->frags[i]),
421 skb_shinfo(skb)->frags[i].page_offset,
422 &head,
423 foreign_vif,
424 foreign_grefs[i]);
425 }
426
427 return npo->meta_prod - old_meta_prod;
428 }
429
430 /*
431 * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
432 * used to set up the operations on the top of
433 * netrx_pending_operations, which have since been done. Check that
434 * they didn't give any errors and advance over them.
435 */
436 static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
437 struct netrx_pending_operations *npo)
438 {
439 struct gnttab_copy *copy_op;
440 int status = XEN_NETIF_RSP_OKAY;
441 int i;
442
443 for (i = 0; i < nr_meta_slots; i++) {
444 copy_op = npo->copy + npo->copy_cons++;
445 if (copy_op->status != GNTST_okay) {
446 netdev_dbg(vif->dev,
447 "Bad status %d from copy to DOM%d.\n",
448 copy_op->status, vif->domid);
449 status = XEN_NETIF_RSP_ERROR;
450 }
451 }
452
453 return status;
454 }
455
456 static void xenvif_add_frag_responses(struct xenvif *vif, int status,
457 struct xenvif_rx_meta *meta,
458 int nr_meta_slots)
459 {
460 int i;
461 unsigned long offset;
462
463 /* No fragments used */
464 if (nr_meta_slots <= 1)
465 return;
466
467 nr_meta_slots--;
468
469 for (i = 0; i < nr_meta_slots; i++) {
470 int flags;
471 if (i == nr_meta_slots - 1)
472 flags = 0;
473 else
474 flags = XEN_NETRXF_more_data;
475
476 offset = 0;
477 make_rx_response(vif, meta[i].id, status, offset,
478 meta[i].size, flags);
479 }
480 }
481
482 struct xenvif_rx_cb {
483 int meta_slots_used;
484 };
485
486 #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
487
488 void xenvif_kick_thread(struct xenvif *vif)
489 {
490 wake_up(&vif->wq);
491 }
492
493 static void xenvif_rx_action(struct xenvif *vif)
494 {
495 s8 status;
496 u16 flags;
497 struct xen_netif_rx_response *resp;
498 struct sk_buff_head rxq;
499 struct sk_buff *skb;
500 LIST_HEAD(notify);
501 int ret;
502 unsigned long offset;
503 bool need_to_notify = false;
504
505 struct netrx_pending_operations npo = {
506 .copy = vif->grant_copy_op,
507 .meta = vif->meta,
508 };
509
510 skb_queue_head_init(&rxq);
511
512 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
513 RING_IDX max_slots_needed;
514 RING_IDX old_req_cons;
515 RING_IDX ring_slots_used;
516 int i;
517
518 /* We need a cheap worse case estimate for the number of
519 * slots we'll use.
520 */
521
522 max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
523 skb_headlen(skb),
524 PAGE_SIZE);
525 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
526 unsigned int size;
527 unsigned int offset;
528
529 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
530 offset = skb_shinfo(skb)->frags[i].page_offset;
531
532 /* For a worse-case estimate we need to factor in
533 * the fragment page offset as this will affect the
534 * number of times xenvif_gop_frag_copy() will
535 * call start_new_rx_buffer().
536 */
537 max_slots_needed += DIV_ROUND_UP(offset + size,
538 PAGE_SIZE);
539 }
540
541 /* To avoid the estimate becoming too pessimal for some
542 * frontends that limit posted rx requests, cap the estimate
543 * at MAX_SKB_FRAGS.
544 */
545 if (max_slots_needed > MAX_SKB_FRAGS)
546 max_slots_needed = MAX_SKB_FRAGS;
547
548 /* We may need one more slot for GSO metadata */
549 if (skb_is_gso(skb) &&
550 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
551 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
552 max_slots_needed++;
553
554 /* If the skb may not fit then bail out now */
555 if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
556 skb_queue_head(&vif->rx_queue, skb);
557 need_to_notify = true;
558 vif->rx_last_skb_slots = max_slots_needed;
559 break;
560 } else
561 vif->rx_last_skb_slots = 0;
562
563 old_req_cons = vif->rx.req_cons;
564 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
565 ring_slots_used = vif->rx.req_cons - old_req_cons;
566
567 BUG_ON(ring_slots_used > max_slots_needed);
568
569 __skb_queue_tail(&rxq, skb);
570 }
571
572 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
573
574 if (!npo.copy_prod)
575 goto done;
576
577 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
578 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
579
580 while ((skb = __skb_dequeue(&rxq)) != NULL) {
581
582 if ((1 << vif->meta[npo.meta_cons].gso_type) &
583 vif->gso_prefix_mask) {
584 resp = RING_GET_RESPONSE(&vif->rx,
585 vif->rx.rsp_prod_pvt++);
586
587 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
588
589 resp->offset = vif->meta[npo.meta_cons].gso_size;
590 resp->id = vif->meta[npo.meta_cons].id;
591 resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
592
593 npo.meta_cons++;
594 XENVIF_RX_CB(skb)->meta_slots_used--;
595 }
596
597
598 vif->dev->stats.tx_bytes += skb->len;
599 vif->dev->stats.tx_packets++;
600
601 status = xenvif_check_gop(vif,
602 XENVIF_RX_CB(skb)->meta_slots_used,
603 &npo);
604
605 if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
606 flags = 0;
607 else
608 flags = XEN_NETRXF_more_data;
609
610 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
611 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
612 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
613 /* remote but checksummed. */
614 flags |= XEN_NETRXF_data_validated;
615
616 offset = 0;
617 resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
618 status, offset,
619 vif->meta[npo.meta_cons].size,
620 flags);
621
622 if ((1 << vif->meta[npo.meta_cons].gso_type) &
623 vif->gso_mask) {
624 struct xen_netif_extra_info *gso =
625 (struct xen_netif_extra_info *)
626 RING_GET_RESPONSE(&vif->rx,
627 vif->rx.rsp_prod_pvt++);
628
629 resp->flags |= XEN_NETRXF_extra_info;
630
631 gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
632 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
633 gso->u.gso.pad = 0;
634 gso->u.gso.features = 0;
635
636 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
637 gso->flags = 0;
638 }
639
640 xenvif_add_frag_responses(vif, status,
641 vif->meta + npo.meta_cons + 1,
642 XENVIF_RX_CB(skb)->meta_slots_used);
643
644 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
645
646 need_to_notify |= !!ret;
647
648 npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
649 dev_kfree_skb(skb);
650 }
651
652 done:
653 if (need_to_notify)
654 notify_remote_via_irq(vif->rx_irq);
655 }
656
657 void xenvif_check_rx_xenvif(struct xenvif *vif)
658 {
659 int more_to_do;
660
661 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
662
663 if (more_to_do)
664 napi_schedule(&vif->napi);
665 }
666
667 static void tx_add_credit(struct xenvif *vif)
668 {
669 unsigned long max_burst, max_credit;
670
671 /*
672 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
673 * Otherwise the interface can seize up due to insufficient credit.
674 */
675 max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
676 max_burst = min(max_burst, 131072UL);
677 max_burst = max(max_burst, vif->credit_bytes);
678
679 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
680 max_credit = vif->remaining_credit + vif->credit_bytes;
681 if (max_credit < vif->remaining_credit)
682 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
683
684 vif->remaining_credit = min(max_credit, max_burst);
685 }
686
687 static void tx_credit_callback(unsigned long data)
688 {
689 struct xenvif *vif = (struct xenvif *)data;
690 tx_add_credit(vif);
691 xenvif_check_rx_xenvif(vif);
692 }
693
694 static void xenvif_tx_err(struct xenvif *vif,
695 struct xen_netif_tx_request *txp, RING_IDX end)
696 {
697 RING_IDX cons = vif->tx.req_cons;
698 unsigned long flags;
699
700 do {
701 spin_lock_irqsave(&vif->response_lock, flags);
702 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
703 spin_unlock_irqrestore(&vif->response_lock, flags);
704 if (cons == end)
705 break;
706 txp = RING_GET_REQUEST(&vif->tx, cons++);
707 } while (1);
708 vif->tx.req_cons = cons;
709 }
710
711 static void xenvif_fatal_tx_err(struct xenvif *vif)
712 {
713 netdev_err(vif->dev, "fatal error; disabling device\n");
714 xenvif_carrier_off(vif);
715 }
716
717 static int xenvif_count_requests(struct xenvif *vif,
718 struct xen_netif_tx_request *first,
719 struct xen_netif_tx_request *txp,
720 int work_to_do)
721 {
722 RING_IDX cons = vif->tx.req_cons;
723 int slots = 0;
724 int drop_err = 0;
725 int more_data;
726
727 if (!(first->flags & XEN_NETTXF_more_data))
728 return 0;
729
730 do {
731 struct xen_netif_tx_request dropped_tx = { 0 };
732
733 if (slots >= work_to_do) {
734 netdev_err(vif->dev,
735 "Asked for %d slots but exceeds this limit\n",
736 work_to_do);
737 xenvif_fatal_tx_err(vif);
738 return -ENODATA;
739 }
740
741 /* This guest is really using too many slots and
742 * considered malicious.
743 */
744 if (unlikely(slots >= fatal_skb_slots)) {
745 netdev_err(vif->dev,
746 "Malicious frontend using %d slots, threshold %u\n",
747 slots, fatal_skb_slots);
748 xenvif_fatal_tx_err(vif);
749 return -E2BIG;
750 }
751
752 /* Xen network protocol had implicit dependency on
753 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
754 * the historical MAX_SKB_FRAGS value 18 to honor the
755 * same behavior as before. Any packet using more than
756 * 18 slots but less than fatal_skb_slots slots is
757 * dropped
758 */
759 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
760 if (net_ratelimit())
761 netdev_dbg(vif->dev,
762 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
763 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
764 drop_err = -E2BIG;
765 }
766
767 if (drop_err)
768 txp = &dropped_tx;
769
770 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
771 sizeof(*txp));
772
773 /* If the guest submitted a frame >= 64 KiB then
774 * first->size overflowed and following slots will
775 * appear to be larger than the frame.
776 *
777 * This cannot be fatal error as there are buggy
778 * frontends that do this.
779 *
780 * Consume all slots and drop the packet.
781 */
782 if (!drop_err && txp->size > first->size) {
783 if (net_ratelimit())
784 netdev_dbg(vif->dev,
785 "Invalid tx request, slot size %u > remaining size %u\n",
786 txp->size, first->size);
787 drop_err = -EIO;
788 }
789
790 first->size -= txp->size;
791 slots++;
792
793 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
794 netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
795 txp->offset, txp->size);
796 xenvif_fatal_tx_err(vif);
797 return -EINVAL;
798 }
799
800 more_data = txp->flags & XEN_NETTXF_more_data;
801
802 if (!drop_err)
803 txp++;
804
805 } while (more_data);
806
807 if (drop_err) {
808 xenvif_tx_err(vif, first, cons + slots);
809 return drop_err;
810 }
811
812 return slots;
813 }
814
815
816 struct xenvif_tx_cb {
817 u16 pending_idx;
818 };
819
820 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
821
822 static inline void xenvif_tx_create_gop(struct xenvif *vif,
823 u16 pending_idx,
824 struct xen_netif_tx_request *txp,
825 struct gnttab_map_grant_ref *gop)
826 {
827 vif->pages_to_map[gop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
828 gnttab_set_map_op(gop, idx_to_kaddr(vif, pending_idx),
829 GNTMAP_host_map | GNTMAP_readonly,
830 txp->gref, vif->domid);
831
832 memcpy(&vif->pending_tx_info[pending_idx].req, txp,
833 sizeof(*txp));
834 }
835
836 static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
837 {
838 struct sk_buff *skb =
839 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
840 GFP_ATOMIC | __GFP_NOWARN);
841 if (unlikely(skb == NULL))
842 return NULL;
843
844 /* Packets passed to netif_rx() must have some headroom. */
845 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
846
847 /* Initialize it here to avoid later surprises */
848 skb_shinfo(skb)->destructor_arg = NULL;
849
850 return skb;
851 }
852
853 static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
854 struct sk_buff *skb,
855 struct xen_netif_tx_request *txp,
856 struct gnttab_map_grant_ref *gop)
857 {
858 struct skb_shared_info *shinfo = skb_shinfo(skb);
859 skb_frag_t *frags = shinfo->frags;
860 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
861 int start;
862 pending_ring_idx_t index;
863 unsigned int nr_slots, frag_overflow = 0;
864
865 /* At this point shinfo->nr_frags is in fact the number of
866 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
867 */
868 if (shinfo->nr_frags > MAX_SKB_FRAGS) {
869 frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
870 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
871 shinfo->nr_frags = MAX_SKB_FRAGS;
872 }
873 nr_slots = shinfo->nr_frags;
874
875 /* Skip first skb fragment if it is on same page as header fragment. */
876 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
877
878 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
879 shinfo->nr_frags++, txp++, gop++) {
880 index = pending_index(vif->pending_cons++);
881 pending_idx = vif->pending_ring[index];
882 xenvif_tx_create_gop(vif, pending_idx, txp, gop);
883 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
884 }
885
886 if (frag_overflow) {
887 struct sk_buff *nskb = xenvif_alloc_skb(0);
888 if (unlikely(nskb == NULL)) {
889 if (net_ratelimit())
890 netdev_err(vif->dev,
891 "Can't allocate the frag_list skb.\n");
892 return NULL;
893 }
894
895 shinfo = skb_shinfo(nskb);
896 frags = shinfo->frags;
897
898 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
899 shinfo->nr_frags++, txp++, gop++) {
900 index = pending_index(vif->pending_cons++);
901 pending_idx = vif->pending_ring[index];
902 xenvif_tx_create_gop(vif, pending_idx, txp, gop);
903 frag_set_pending_idx(&frags[shinfo->nr_frags],
904 pending_idx);
905 }
906
907 skb_shinfo(skb)->frag_list = nskb;
908 }
909
910 return gop;
911 }
912
913 static inline void xenvif_grant_handle_set(struct xenvif *vif,
914 u16 pending_idx,
915 grant_handle_t handle)
916 {
917 if (unlikely(vif->grant_tx_handle[pending_idx] !=
918 NETBACK_INVALID_HANDLE)) {
919 netdev_err(vif->dev,
920 "Trying to overwrite active handle! pending_idx: %x\n",
921 pending_idx);
922 BUG();
923 }
924 vif->grant_tx_handle[pending_idx] = handle;
925 }
926
927 static inline void xenvif_grant_handle_reset(struct xenvif *vif,
928 u16 pending_idx)
929 {
930 if (unlikely(vif->grant_tx_handle[pending_idx] ==
931 NETBACK_INVALID_HANDLE)) {
932 netdev_err(vif->dev,
933 "Trying to unmap invalid handle! pending_idx: %x\n",
934 pending_idx);
935 BUG();
936 }
937 vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
938 }
939
940 static int xenvif_tx_check_gop(struct xenvif *vif,
941 struct sk_buff *skb,
942 struct gnttab_map_grant_ref **gopp)
943 {
944 struct gnttab_map_grant_ref *gop = *gopp;
945 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
946 struct skb_shared_info *shinfo = skb_shinfo(skb);
947 struct pending_tx_info *tx_info;
948 int nr_frags = shinfo->nr_frags;
949 int i, err, start;
950 struct sk_buff *first_skb = NULL;
951
952 /* Check status of header. */
953 err = gop->status;
954 if (unlikely(err))
955 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
956 else
957 xenvif_grant_handle_set(vif, pending_idx , gop->handle);
958
959 /* Skip first skb fragment if it is on same page as header fragment. */
960 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
961
962 check_frags:
963 for (i = start; i < nr_frags; i++) {
964 int j, newerr;
965
966 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
967 tx_info = &vif->pending_tx_info[pending_idx];
968
969 /* Check error status: if okay then remember grant handle. */
970 newerr = (++gop)->status;
971
972 if (likely(!newerr)) {
973 xenvif_grant_handle_set(vif, pending_idx , gop->handle);
974 /* Had a previous error? Invalidate this fragment. */
975 if (unlikely(err))
976 xenvif_idx_unmap(vif, pending_idx);
977 continue;
978 }
979
980 /* Error on this fragment: respond to client with an error. */
981 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
982
983 /* Not the first error? Preceding frags already invalidated. */
984 if (err)
985 continue;
986 /* First error: invalidate header and preceding fragments. */
987 if (!first_skb)
988 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
989 else
990 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
991 xenvif_idx_unmap(vif, pending_idx);
992 for (j = start; j < i; j++) {
993 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
994 xenvif_idx_unmap(vif, pending_idx);
995 }
996
997 /* Remember the error: invalidate all subsequent fragments. */
998 err = newerr;
999 }
1000
1001 if (skb_has_frag_list(skb)) {
1002 first_skb = skb;
1003 skb = shinfo->frag_list;
1004 shinfo = skb_shinfo(skb);
1005 nr_frags = shinfo->nr_frags;
1006 start = 0;
1007
1008 goto check_frags;
1009 }
1010
1011 /* There was a mapping error in the frag_list skb. We have to unmap
1012 * the first skb's frags
1013 */
1014 if (first_skb && err) {
1015 int j;
1016 shinfo = skb_shinfo(first_skb);
1017 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1018 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1019 for (j = start; j < shinfo->nr_frags; j++) {
1020 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1021 xenvif_idx_unmap(vif, pending_idx);
1022 }
1023 }
1024
1025 *gopp = gop + 1;
1026 return err;
1027 }
1028
1029 static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
1030 {
1031 struct skb_shared_info *shinfo = skb_shinfo(skb);
1032 int nr_frags = shinfo->nr_frags;
1033 int i;
1034 u16 prev_pending_idx = INVALID_PENDING_IDX;
1035
1036 if (skb_shinfo(skb)->destructor_arg)
1037 prev_pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1038
1039 for (i = 0; i < nr_frags; i++) {
1040 skb_frag_t *frag = shinfo->frags + i;
1041 struct xen_netif_tx_request *txp;
1042 struct page *page;
1043 u16 pending_idx;
1044
1045 pending_idx = frag_get_pending_idx(frag);
1046
1047 /* If this is not the first frag, chain it to the previous*/
1048 if (unlikely(prev_pending_idx == INVALID_PENDING_IDX))
1049 skb_shinfo(skb)->destructor_arg =
1050 &callback_param(vif, pending_idx);
1051 else if (likely(pending_idx != prev_pending_idx))
1052 callback_param(vif, prev_pending_idx).ctx =
1053 &callback_param(vif, pending_idx);
1054
1055 callback_param(vif, pending_idx).ctx = NULL;
1056 prev_pending_idx = pending_idx;
1057
1058 txp = &vif->pending_tx_info[pending_idx].req;
1059 page = virt_to_page(idx_to_kaddr(vif, pending_idx));
1060 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1061 skb->len += txp->size;
1062 skb->data_len += txp->size;
1063 skb->truesize += txp->size;
1064
1065 /* Take an extra reference to offset network stack's put_page */
1066 get_page(vif->mmap_pages[pending_idx]);
1067 }
1068 /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
1069 * overlaps with "index", and "mapping" is not set. I think mapping
1070 * should be set. If delivered to local stack, it would drop this
1071 * skb in sk_filter unless the socket has the right to use it.
1072 */
1073 skb->pfmemalloc = false;
1074 }
1075
1076 static int xenvif_get_extras(struct xenvif *vif,
1077 struct xen_netif_extra_info *extras,
1078 int work_to_do)
1079 {
1080 struct xen_netif_extra_info extra;
1081 RING_IDX cons = vif->tx.req_cons;
1082
1083 do {
1084 if (unlikely(work_to_do-- <= 0)) {
1085 netdev_err(vif->dev, "Missing extra info\n");
1086 xenvif_fatal_tx_err(vif);
1087 return -EBADR;
1088 }
1089
1090 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1091 sizeof(extra));
1092 if (unlikely(!extra.type ||
1093 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1094 vif->tx.req_cons = ++cons;
1095 netdev_err(vif->dev,
1096 "Invalid extra type: %d\n", extra.type);
1097 xenvif_fatal_tx_err(vif);
1098 return -EINVAL;
1099 }
1100
1101 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1102 vif->tx.req_cons = ++cons;
1103 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1104
1105 return work_to_do;
1106 }
1107
1108 static int xenvif_set_skb_gso(struct xenvif *vif,
1109 struct sk_buff *skb,
1110 struct xen_netif_extra_info *gso)
1111 {
1112 if (!gso->u.gso.size) {
1113 netdev_err(vif->dev, "GSO size must not be zero.\n");
1114 xenvif_fatal_tx_err(vif);
1115 return -EINVAL;
1116 }
1117
1118 switch (gso->u.gso.type) {
1119 case XEN_NETIF_GSO_TYPE_TCPV4:
1120 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1121 break;
1122 case XEN_NETIF_GSO_TYPE_TCPV6:
1123 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1124 break;
1125 default:
1126 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1127 xenvif_fatal_tx_err(vif);
1128 return -EINVAL;
1129 }
1130
1131 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1132 /* gso_segs will be calculated later */
1133
1134 return 0;
1135 }
1136
1137 static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1138 {
1139 bool recalculate_partial_csum = false;
1140
1141 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1142 * peers can fail to set NETRXF_csum_blank when sending a GSO
1143 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1144 * recalculate the partial checksum.
1145 */
1146 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1147 vif->rx_gso_checksum_fixup++;
1148 skb->ip_summed = CHECKSUM_PARTIAL;
1149 recalculate_partial_csum = true;
1150 }
1151
1152 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1153 if (skb->ip_summed != CHECKSUM_PARTIAL)
1154 return 0;
1155
1156 return skb_checksum_setup(skb, recalculate_partial_csum);
1157 }
1158
1159 static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1160 {
1161 u64 now = get_jiffies_64();
1162 u64 next_credit = vif->credit_window_start +
1163 msecs_to_jiffies(vif->credit_usec / 1000);
1164
1165 /* Timer could already be pending in rare cases. */
1166 if (timer_pending(&vif->credit_timeout))
1167 return true;
1168
1169 /* Passed the point where we can replenish credit? */
1170 if (time_after_eq64(now, next_credit)) {
1171 vif->credit_window_start = now;
1172 tx_add_credit(vif);
1173 }
1174
1175 /* Still too big to send right now? Set a callback. */
1176 if (size > vif->remaining_credit) {
1177 vif->credit_timeout.data =
1178 (unsigned long)vif;
1179 vif->credit_timeout.function =
1180 tx_credit_callback;
1181 mod_timer(&vif->credit_timeout,
1182 next_credit);
1183 vif->credit_window_start = next_credit;
1184
1185 return true;
1186 }
1187
1188 return false;
1189 }
1190
1191 static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1192 {
1193 struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
1194 struct sk_buff *skb;
1195 int ret;
1196
1197 while (skb_queue_len(&vif->tx_queue) < budget) {
1198 struct xen_netif_tx_request txreq;
1199 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1200 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1201 u16 pending_idx;
1202 RING_IDX idx;
1203 int work_to_do;
1204 unsigned int data_len;
1205 pending_ring_idx_t index;
1206
1207 if (vif->tx.sring->req_prod - vif->tx.req_cons >
1208 XEN_NETIF_TX_RING_SIZE) {
1209 netdev_err(vif->dev,
1210 "Impossible number of requests. "
1211 "req_prod %d, req_cons %d, size %ld\n",
1212 vif->tx.sring->req_prod, vif->tx.req_cons,
1213 XEN_NETIF_TX_RING_SIZE);
1214 xenvif_fatal_tx_err(vif);
1215 continue;
1216 }
1217
1218 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
1219 if (!work_to_do)
1220 break;
1221
1222 idx = vif->tx.req_cons;
1223 rmb(); /* Ensure that we see the request before we copy it. */
1224 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1225
1226 /* Credit-based scheduling. */
1227 if (txreq.size > vif->remaining_credit &&
1228 tx_credit_exceeded(vif, txreq.size))
1229 break;
1230
1231 vif->remaining_credit -= txreq.size;
1232
1233 work_to_do--;
1234 vif->tx.req_cons = ++idx;
1235
1236 memset(extras, 0, sizeof(extras));
1237 if (txreq.flags & XEN_NETTXF_extra_info) {
1238 work_to_do = xenvif_get_extras(vif, extras,
1239 work_to_do);
1240 idx = vif->tx.req_cons;
1241 if (unlikely(work_to_do < 0))
1242 break;
1243 }
1244
1245 ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
1246 if (unlikely(ret < 0))
1247 break;
1248
1249 idx += ret;
1250
1251 if (unlikely(txreq.size < ETH_HLEN)) {
1252 netdev_dbg(vif->dev,
1253 "Bad packet size: %d\n", txreq.size);
1254 xenvif_tx_err(vif, &txreq, idx);
1255 break;
1256 }
1257
1258 /* No crossing a page as the payload mustn't fragment. */
1259 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1260 netdev_err(vif->dev,
1261 "txreq.offset: %x, size: %u, end: %lu\n",
1262 txreq.offset, txreq.size,
1263 (txreq.offset&~PAGE_MASK) + txreq.size);
1264 xenvif_fatal_tx_err(vif);
1265 break;
1266 }
1267
1268 index = pending_index(vif->pending_cons);
1269 pending_idx = vif->pending_ring[index];
1270
1271 data_len = (txreq.size > PKT_PROT_LEN &&
1272 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1273 PKT_PROT_LEN : txreq.size;
1274
1275 skb = xenvif_alloc_skb(data_len);
1276 if (unlikely(skb == NULL)) {
1277 netdev_dbg(vif->dev,
1278 "Can't allocate a skb in start_xmit.\n");
1279 xenvif_tx_err(vif, &txreq, idx);
1280 break;
1281 }
1282
1283 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1284 struct xen_netif_extra_info *gso;
1285 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1286
1287 if (xenvif_set_skb_gso(vif, skb, gso)) {
1288 /* Failure in xenvif_set_skb_gso is fatal. */
1289 kfree_skb(skb);
1290 break;
1291 }
1292 }
1293
1294 xenvif_tx_create_gop(vif, pending_idx, &txreq, gop);
1295
1296 gop++;
1297
1298 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1299
1300 __skb_put(skb, data_len);
1301
1302 skb_shinfo(skb)->nr_frags = ret;
1303 if (data_len < txreq.size) {
1304 skb_shinfo(skb)->nr_frags++;
1305 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1306 pending_idx);
1307 } else {
1308 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1309 INVALID_PENDING_IDX);
1310 }
1311
1312 vif->pending_cons++;
1313
1314 request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
1315 if (request_gop == NULL) {
1316 kfree_skb(skb);
1317 xenvif_tx_err(vif, &txreq, idx);
1318 break;
1319 }
1320 gop = request_gop;
1321
1322 __skb_queue_tail(&vif->tx_queue, skb);
1323
1324 vif->tx.req_cons = idx;
1325
1326 if ((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops))
1327 break;
1328 }
1329
1330 return gop - vif->tx_map_ops;
1331 }
1332
1333 /* Consolidate skb with a frag_list into a brand new one with local pages on
1334 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1335 */
1336 static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
1337 {
1338 unsigned int offset = skb_headlen(skb);
1339 skb_frag_t frags[MAX_SKB_FRAGS];
1340 int i;
1341 struct ubuf_info *uarg;
1342 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1343
1344 vif->tx_zerocopy_sent += 2;
1345 vif->tx_frag_overflow++;
1346
1347 xenvif_fill_frags(vif, nskb);
1348 /* Subtract frags size, we will correct it later */
1349 skb->truesize -= skb->data_len;
1350 skb->len += nskb->len;
1351 skb->data_len += nskb->len;
1352
1353 /* create a brand new frags array and coalesce there */
1354 for (i = 0; offset < skb->len; i++) {
1355 struct page *page;
1356 unsigned int len;
1357
1358 BUG_ON(i >= MAX_SKB_FRAGS);
1359 page = alloc_page(GFP_ATOMIC|__GFP_COLD);
1360 if (!page) {
1361 int j;
1362 skb->truesize += skb->data_len;
1363 for (j = 0; j < i; j++)
1364 put_page(frags[j].page.p);
1365 return -ENOMEM;
1366 }
1367
1368 if (offset + PAGE_SIZE < skb->len)
1369 len = PAGE_SIZE;
1370 else
1371 len = skb->len - offset;
1372 if (skb_copy_bits(skb, offset, page_address(page), len))
1373 BUG();
1374
1375 offset += len;
1376 frags[i].page.p = page;
1377 frags[i].page_offset = 0;
1378 skb_frag_size_set(&frags[i], len);
1379 }
1380 /* swap out with old one */
1381 memcpy(skb_shinfo(skb)->frags,
1382 frags,
1383 i * sizeof(skb_frag_t));
1384 skb_shinfo(skb)->nr_frags = i;
1385 skb->truesize += i * PAGE_SIZE;
1386
1387 /* remove traces of mapped pages and frag_list */
1388 skb_frag_list_init(skb);
1389 uarg = skb_shinfo(skb)->destructor_arg;
1390 uarg->callback(uarg, true);
1391 skb_shinfo(skb)->destructor_arg = NULL;
1392
1393 skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1394 kfree_skb(nskb);
1395
1396 return 0;
1397 }
1398
1399 static int xenvif_tx_submit(struct xenvif *vif)
1400 {
1401 struct gnttab_map_grant_ref *gop = vif->tx_map_ops;
1402 struct sk_buff *skb;
1403 int work_done = 0;
1404
1405 while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1406 struct xen_netif_tx_request *txp;
1407 u16 pending_idx;
1408 unsigned data_len;
1409
1410 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1411 txp = &vif->pending_tx_info[pending_idx].req;
1412
1413 /* Check the remap error code. */
1414 if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
1415 netdev_dbg(vif->dev, "netback grant failed.\n");
1416 skb_shinfo(skb)->nr_frags = 0;
1417 kfree_skb(skb);
1418 continue;
1419 }
1420
1421 data_len = skb->len;
1422 memcpy(skb->data,
1423 (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
1424 data_len);
1425 callback_param(vif, pending_idx).ctx = NULL;
1426 if (data_len < txp->size) {
1427 /* Append the packet payload as a fragment. */
1428 txp->offset += data_len;
1429 txp->size -= data_len;
1430 skb_shinfo(skb)->destructor_arg =
1431 &callback_param(vif, pending_idx);
1432 } else {
1433 /* Schedule a response immediately. */
1434 xenvif_idx_unmap(vif, pending_idx);
1435 }
1436
1437 if (txp->flags & XEN_NETTXF_csum_blank)
1438 skb->ip_summed = CHECKSUM_PARTIAL;
1439 else if (txp->flags & XEN_NETTXF_data_validated)
1440 skb->ip_summed = CHECKSUM_UNNECESSARY;
1441
1442 xenvif_fill_frags(vif, skb);
1443
1444 if (unlikely(skb_has_frag_list(skb))) {
1445 if (xenvif_handle_frag_list(vif, skb)) {
1446 if (net_ratelimit())
1447 netdev_err(vif->dev,
1448 "Not enough memory to consolidate frag_list!\n");
1449 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1450 kfree_skb(skb);
1451 continue;
1452 }
1453 }
1454
1455 if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
1456 int target = min_t(int, skb->len, PKT_PROT_LEN);
1457 __pskb_pull_tail(skb, target - skb_headlen(skb));
1458 }
1459
1460 skb->dev = vif->dev;
1461 skb->protocol = eth_type_trans(skb, skb->dev);
1462 skb_reset_network_header(skb);
1463
1464 if (checksum_setup(vif, skb)) {
1465 netdev_dbg(vif->dev,
1466 "Can't setup checksum in net_tx_action\n");
1467 /* We have to set this flag to trigger the callback */
1468 if (skb_shinfo(skb)->destructor_arg)
1469 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1470 kfree_skb(skb);
1471 continue;
1472 }
1473
1474 skb_probe_transport_header(skb, 0);
1475
1476 /* If the packet is GSO then we will have just set up the
1477 * transport header offset in checksum_setup so it's now
1478 * straightforward to calculate gso_segs.
1479 */
1480 if (skb_is_gso(skb)) {
1481 int mss = skb_shinfo(skb)->gso_size;
1482 int hdrlen = skb_transport_header(skb) -
1483 skb_mac_header(skb) +
1484 tcp_hdrlen(skb);
1485
1486 skb_shinfo(skb)->gso_segs =
1487 DIV_ROUND_UP(skb->len - hdrlen, mss);
1488 }
1489
1490 vif->dev->stats.rx_bytes += skb->len;
1491 vif->dev->stats.rx_packets++;
1492
1493 work_done++;
1494
1495 /* Set this flag right before netif_receive_skb, otherwise
1496 * someone might think this packet already left netback, and
1497 * do a skb_copy_ubufs while we are still in control of the
1498 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1499 */
1500 if (skb_shinfo(skb)->destructor_arg) {
1501 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1502 vif->tx_zerocopy_sent++;
1503 }
1504
1505 netif_receive_skb(skb);
1506 }
1507
1508 return work_done;
1509 }
1510
1511 void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1512 {
1513 unsigned long flags;
1514 pending_ring_idx_t index;
1515 struct xenvif *vif = ubuf_to_vif(ubuf);
1516
1517 /* This is the only place where we grab this lock, to protect callbacks
1518 * from each other.
1519 */
1520 spin_lock_irqsave(&vif->callback_lock, flags);
1521 do {
1522 u16 pending_idx = ubuf->desc;
1523 ubuf = (struct ubuf_info *) ubuf->ctx;
1524 BUG_ON(vif->dealloc_prod - vif->dealloc_cons >=
1525 MAX_PENDING_REQS);
1526 index = pending_index(vif->dealloc_prod);
1527 vif->dealloc_ring[index] = pending_idx;
1528 /* Sync with xenvif_tx_dealloc_action:
1529 * insert idx then incr producer.
1530 */
1531 smp_wmb();
1532 vif->dealloc_prod++;
1533 } while (ubuf);
1534 wake_up(&vif->dealloc_wq);
1535 spin_unlock_irqrestore(&vif->callback_lock, flags);
1536
1537 if (likely(zerocopy_success))
1538 vif->tx_zerocopy_success++;
1539 else
1540 vif->tx_zerocopy_fail++;
1541 }
1542
1543 static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
1544 {
1545 struct gnttab_unmap_grant_ref *gop;
1546 pending_ring_idx_t dc, dp;
1547 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1548 unsigned int i = 0;
1549
1550 dc = vif->dealloc_cons;
1551 gop = vif->tx_unmap_ops;
1552
1553 /* Free up any grants we have finished using */
1554 do {
1555 dp = vif->dealloc_prod;
1556
1557 /* Ensure we see all indices enqueued by all
1558 * xenvif_zerocopy_callback().
1559 */
1560 smp_rmb();
1561
1562 while (dc != dp) {
1563 BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS);
1564 pending_idx =
1565 vif->dealloc_ring[pending_index(dc++)];
1566
1567 pending_idx_release[gop-vif->tx_unmap_ops] =
1568 pending_idx;
1569 vif->pages_to_unmap[gop-vif->tx_unmap_ops] =
1570 vif->mmap_pages[pending_idx];
1571 gnttab_set_unmap_op(gop,
1572 idx_to_kaddr(vif, pending_idx),
1573 GNTMAP_host_map,
1574 vif->grant_tx_handle[pending_idx]);
1575 xenvif_grant_handle_reset(vif, pending_idx);
1576 ++gop;
1577 }
1578
1579 } while (dp != vif->dealloc_prod);
1580
1581 vif->dealloc_cons = dc;
1582
1583 if (gop - vif->tx_unmap_ops > 0) {
1584 int ret;
1585 ret = gnttab_unmap_refs(vif->tx_unmap_ops,
1586 NULL,
1587 vif->pages_to_unmap,
1588 gop - vif->tx_unmap_ops);
1589 if (ret) {
1590 netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
1591 gop - vif->tx_unmap_ops, ret);
1592 for (i = 0; i < gop - vif->tx_unmap_ops; ++i) {
1593 if (gop[i].status != GNTST_okay)
1594 netdev_err(vif->dev,
1595 " host_addr: %llx handle: %x status: %d\n",
1596 gop[i].host_addr,
1597 gop[i].handle,
1598 gop[i].status);
1599 }
1600 BUG();
1601 }
1602 }
1603
1604 for (i = 0; i < gop - vif->tx_unmap_ops; ++i)
1605 xenvif_idx_release(vif, pending_idx_release[i],
1606 XEN_NETIF_RSP_OKAY);
1607 }
1608
1609
1610 /* Called after netfront has transmitted */
1611 int xenvif_tx_action(struct xenvif *vif, int budget)
1612 {
1613 unsigned nr_gops;
1614 int work_done, ret;
1615
1616 if (unlikely(!tx_work_todo(vif)))
1617 return 0;
1618
1619 nr_gops = xenvif_tx_build_gops(vif, budget);
1620
1621 if (nr_gops == 0)
1622 return 0;
1623
1624 ret = gnttab_map_refs(vif->tx_map_ops,
1625 NULL,
1626 vif->pages_to_map,
1627 nr_gops);
1628 BUG_ON(ret);
1629
1630 work_done = xenvif_tx_submit(vif);
1631
1632 return work_done;
1633 }
1634
1635 static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
1636 u8 status)
1637 {
1638 struct pending_tx_info *pending_tx_info;
1639 pending_ring_idx_t index;
1640 unsigned long flags;
1641
1642 pending_tx_info = &vif->pending_tx_info[pending_idx];
1643 spin_lock_irqsave(&vif->response_lock, flags);
1644 make_tx_response(vif, &pending_tx_info->req, status);
1645 index = pending_index(vif->pending_prod);
1646 vif->pending_ring[index] = pending_idx;
1647 /* TX shouldn't use the index before we give it back here */
1648 mb();
1649 vif->pending_prod++;
1650 spin_unlock_irqrestore(&vif->response_lock, flags);
1651 }
1652
1653
1654 static void make_tx_response(struct xenvif *vif,
1655 struct xen_netif_tx_request *txp,
1656 s8 st)
1657 {
1658 RING_IDX i = vif->tx.rsp_prod_pvt;
1659 struct xen_netif_tx_response *resp;
1660 int notify;
1661
1662 resp = RING_GET_RESPONSE(&vif->tx, i);
1663 resp->id = txp->id;
1664 resp->status = st;
1665
1666 if (txp->flags & XEN_NETTXF_extra_info)
1667 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1668
1669 vif->tx.rsp_prod_pvt = ++i;
1670 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1671 if (notify)
1672 notify_remote_via_irq(vif->tx_irq);
1673 }
1674
1675 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1676 u16 id,
1677 s8 st,
1678 u16 offset,
1679 u16 size,
1680 u16 flags)
1681 {
1682 RING_IDX i = vif->rx.rsp_prod_pvt;
1683 struct xen_netif_rx_response *resp;
1684
1685 resp = RING_GET_RESPONSE(&vif->rx, i);
1686 resp->offset = offset;
1687 resp->flags = flags;
1688 resp->id = id;
1689 resp->status = (s16)size;
1690 if (st < 0)
1691 resp->status = (s16)st;
1692
1693 vif->rx.rsp_prod_pvt = ++i;
1694
1695 return resp;
1696 }
1697
1698 void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
1699 {
1700 int ret;
1701 struct gnttab_unmap_grant_ref tx_unmap_op;
1702
1703 gnttab_set_unmap_op(&tx_unmap_op,
1704 idx_to_kaddr(vif, pending_idx),
1705 GNTMAP_host_map,
1706 vif->grant_tx_handle[pending_idx]);
1707 xenvif_grant_handle_reset(vif, pending_idx);
1708
1709 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1710 &vif->mmap_pages[pending_idx], 1);
1711 if (ret) {
1712 netdev_err(vif->dev,
1713 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
1714 ret,
1715 pending_idx,
1716 tx_unmap_op.host_addr,
1717 tx_unmap_op.handle,
1718 tx_unmap_op.status);
1719 BUG();
1720 }
1721
1722 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
1723 }
1724
1725 static inline int rx_work_todo(struct xenvif *vif)
1726 {
1727 return (!skb_queue_empty(&vif->rx_queue) &&
1728 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
1729 vif->rx_queue_purge;
1730 }
1731
1732 static inline int tx_work_todo(struct xenvif *vif)
1733 {
1734
1735 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
1736 return 1;
1737
1738 return 0;
1739 }
1740
1741 static inline bool tx_dealloc_work_todo(struct xenvif *vif)
1742 {
1743 return vif->dealloc_cons != vif->dealloc_prod;
1744 }
1745
1746 void xenvif_unmap_frontend_rings(struct xenvif *vif)
1747 {
1748 if (vif->tx.sring)
1749 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1750 vif->tx.sring);
1751 if (vif->rx.sring)
1752 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1753 vif->rx.sring);
1754 }
1755
1756 int xenvif_map_frontend_rings(struct xenvif *vif,
1757 grant_ref_t tx_ring_ref,
1758 grant_ref_t rx_ring_ref)
1759 {
1760 void *addr;
1761 struct xen_netif_tx_sring *txs;
1762 struct xen_netif_rx_sring *rxs;
1763
1764 int err = -ENOMEM;
1765
1766 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1767 tx_ring_ref, &addr);
1768 if (err)
1769 goto err;
1770
1771 txs = (struct xen_netif_tx_sring *)addr;
1772 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1773
1774 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1775 rx_ring_ref, &addr);
1776 if (err)
1777 goto err;
1778
1779 rxs = (struct xen_netif_rx_sring *)addr;
1780 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1781
1782 return 0;
1783
1784 err:
1785 xenvif_unmap_frontend_rings(vif);
1786 return err;
1787 }
1788
1789 void xenvif_stop_queue(struct xenvif *vif)
1790 {
1791 if (!vif->can_queue)
1792 return;
1793
1794 netif_stop_queue(vif->dev);
1795 }
1796
1797 static void xenvif_start_queue(struct xenvif *vif)
1798 {
1799 if (xenvif_schedulable(vif))
1800 netif_wake_queue(vif->dev);
1801 }
1802
1803 int xenvif_kthread_guest_rx(void *data)
1804 {
1805 struct xenvif *vif = data;
1806 struct sk_buff *skb;
1807
1808 while (!kthread_should_stop()) {
1809 wait_event_interruptible(vif->wq,
1810 rx_work_todo(vif) ||
1811 kthread_should_stop());
1812 if (kthread_should_stop())
1813 break;
1814
1815 if (vif->rx_queue_purge) {
1816 skb_queue_purge(&vif->rx_queue);
1817 vif->rx_queue_purge = false;
1818 }
1819
1820 if (!skb_queue_empty(&vif->rx_queue))
1821 xenvif_rx_action(vif);
1822
1823 if (skb_queue_empty(&vif->rx_queue) &&
1824 netif_queue_stopped(vif->dev)) {
1825 del_timer_sync(&vif->wake_queue);
1826 xenvif_start_queue(vif);
1827 }
1828
1829 cond_resched();
1830 }
1831
1832 /* Bin any remaining skbs */
1833 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
1834 dev_kfree_skb(skb);
1835
1836 return 0;
1837 }
1838
1839 int xenvif_dealloc_kthread(void *data)
1840 {
1841 struct xenvif *vif = data;
1842
1843 while (!kthread_should_stop()) {
1844 wait_event_interruptible(vif->dealloc_wq,
1845 tx_dealloc_work_todo(vif) ||
1846 kthread_should_stop());
1847 if (kthread_should_stop())
1848 break;
1849
1850 xenvif_tx_dealloc_action(vif);
1851 cond_resched();
1852 }
1853
1854 /* Unmap anything remaining*/
1855 if (tx_dealloc_work_todo(vif))
1856 xenvif_tx_dealloc_action(vif);
1857
1858 return 0;
1859 }
1860
1861 static int __init netback_init(void)
1862 {
1863 int rc = 0;
1864
1865 if (!xen_domain())
1866 return -ENODEV;
1867
1868 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1869 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1870 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1871 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1872 }
1873
1874 rc = xenvif_xenbus_init();
1875 if (rc)
1876 goto failed_init;
1877
1878 rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
1879
1880 return 0;
1881
1882 failed_init:
1883 return rc;
1884 }
1885
1886 module_init(netback_init);
1887
1888 static void __exit netback_fini(void)
1889 {
1890 xenvif_xenbus_fini();
1891 }
1892 module_exit(netback_fini);
1893
1894 MODULE_LICENSE("Dual BSD/GPL");
1895 MODULE_ALIAS("xen-backend:vif");
This page took 0.106361 seconds and 5 git commands to generate.