Merge branch 'x86/cpu' into x86/core
[deliverable/linux.git] / arch / x86 / xen / multicalls.c
CommitLineData
5ead97c8
JF
1/*
2 * Xen hypercall batching.
3 *
4 * Xen allows multiple hypercalls to be issued at once, using the
5 * multicall interface. This allows the cost of trapping into the
6 * hypervisor to be amortized over several calls.
7 *
8 * This file implements a simple interface for multicalls. There's a
9 * per-cpu buffer of outstanding multicalls. When you want to queue a
10 * multicall for issuing, you can allocate a multicall slot for the
11 * call and its arguments, along with storage for space which is
12 * pointed to by the arguments (for passing pointers to structures,
13 * etc). When the multicall is actually issued, all the space for the
14 * commands and allocated memory is freed for reuse.
15 *
16 * Multicalls are flushed whenever any of the buffers get full, or
17 * when explicitly requested. There's no way to get per-multicall
18 * return results back. It will BUG if any of the multicalls fail.
19 *
20 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
21 */
22#include <linux/percpu.h>
f120f13e 23#include <linux/hardirq.h>
5ead97c8
JF
24
25#include <asm/xen/hypercall.h>
26
27#include "multicalls.h"
28
a122d623
JF
29#define MC_DEBUG 1
30
d66bf8fc 31#define MC_BATCH 32
400d3494 32#define MC_ARGS (MC_BATCH * 16)
5ead97c8
JF
33
34struct mc_buffer {
35 struct multicall_entry entries[MC_BATCH];
a122d623
JF
36#if MC_DEBUG
37 struct multicall_entry debug[MC_BATCH];
38#endif
400d3494 39 unsigned char args[MC_ARGS];
91e0c5f3
JF
40 struct callback {
41 void (*fn)(void *);
42 void *data;
43 } callbacks[MC_BATCH];
44 unsigned mcidx, argidx, cbidx;
5ead97c8
JF
45};
46
47static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
48DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
49
50void xen_mc_flush(void)
51{
f120f13e 52 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
5ead97c8
JF
53 int ret = 0;
54 unsigned long flags;
91e0c5f3 55 int i;
5ead97c8 56
f120f13e
JF
57 BUG_ON(preemptible());
58
5ead97c8
JF
59 /* Disable interrupts in case someone comes in and queues
60 something in the middle */
61 local_irq_save(flags);
62
63 if (b->mcidx) {
a122d623
JF
64#if MC_DEBUG
65 memcpy(b->debug, b->entries,
66 b->mcidx * sizeof(struct multicall_entry));
67#endif
68
5ead97c8
JF
69 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
70 BUG();
71 for (i = 0; i < b->mcidx; i++)
72 if (b->entries[i].result < 0)
73 ret++;
a122d623
JF
74
75#if MC_DEBUG
76 if (ret) {
77 printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
78 ret, smp_processor_id());
8ba6c2b0 79 dump_stack();
7ebed39f 80 for (i = 0; i < b->mcidx; i++) {
a122d623
JF
81 printk(" call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
82 i+1, b->mcidx,
83 b->debug[i].op,
84 b->debug[i].args[0],
85 b->entries[i].result);
86 }
87 }
88#endif
89
5ead97c8
JF
90 b->mcidx = 0;
91 b->argidx = 0;
92 } else
93 BUG_ON(b->argidx != 0);
94
5ead97c8
JF
95 local_irq_restore(flags);
96
7ebed39f 97 for (i = 0; i < b->cbidx; i++) {
91e0c5f3
JF
98 struct callback *cb = &b->callbacks[i];
99
100 (*cb->fn)(cb->data);
101 }
102 b->cbidx = 0;
103
5ead97c8
JF
104 BUG_ON(ret);
105}
106
107struct multicall_space __xen_mc_entry(size_t args)
108{
f120f13e 109 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
5ead97c8 110 struct multicall_space ret;
400d3494 111 unsigned argidx = roundup(b->argidx, sizeof(u64));
5ead97c8 112
f120f13e 113 BUG_ON(preemptible());
400d3494 114 BUG_ON(b->argidx > MC_ARGS);
5ead97c8
JF
115
116 if (b->mcidx == MC_BATCH ||
400d3494 117 (argidx + args) > MC_ARGS) {
5ead97c8 118 xen_mc_flush();
400d3494
JF
119 argidx = roundup(b->argidx, sizeof(u64));
120 }
5ead97c8
JF
121
122 ret.mc = &b->entries[b->mcidx];
123 b->mcidx++;
400d3494
JF
124 ret.args = &b->args[argidx];
125 b->argidx = argidx + args;
126
127 BUG_ON(b->argidx > MC_ARGS);
128 return ret;
129}
130
131struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
132{
133 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
134 struct multicall_space ret = { NULL, NULL };
135
136 BUG_ON(preemptible());
137 BUG_ON(b->argidx > MC_ARGS);
138
139 if (b->mcidx == 0)
140 return ret;
141
142 if (b->entries[b->mcidx - 1].op != op)
143 return ret;
144
145 if ((b->argidx + size) > MC_ARGS)
146 return ret;
147
148 ret.mc = &b->entries[b->mcidx - 1];
5ead97c8 149 ret.args = &b->args[b->argidx];
400d3494 150 b->argidx += size;
5ead97c8 151
400d3494 152 BUG_ON(b->argidx > MC_ARGS);
5ead97c8
JF
153 return ret;
154}
91e0c5f3
JF
155
156void xen_mc_callback(void (*fn)(void *), void *data)
157{
158 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
159 struct callback *cb;
160
161 if (b->cbidx == MC_BATCH)
162 xen_mc_flush();
163
164 cb = &b->callbacks[b->cbidx++];
165 cb->fn = fn;
166 cb->data = data;
167}
This page took 0.201478 seconds and 5 git commands to generate.