]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/xen/multicalls.c
x86, xen: short-circuit tests for dom0
[mirror_ubuntu-artful-kernel.git] / arch / x86 / xen / multicalls.c
CommitLineData
5ead97c8
JF
1/*
2 * Xen hypercall batching.
3 *
4 * Xen allows multiple hypercalls to be issued at once, using the
5 * multicall interface. This allows the cost of trapping into the
6 * hypervisor to be amortized over several calls.
7 *
8 * This file implements a simple interface for multicalls. There's a
9 * per-cpu buffer of outstanding multicalls. When you want to queue a
10 * multicall for issuing, you can allocate a multicall slot for the
11 * call and its arguments, along with storage for space which is
12 * pointed to by the arguments (for passing pointers to structures,
13 * etc). When the multicall is actually issued, all the space for the
14 * commands and allocated memory is freed for reuse.
15 *
16 * Multicalls are flushed whenever any of the buffers get full, or
17 * when explicitly requested. There's no way to get per-multicall
18 * return results back. It will BUG if any of the multicalls fail.
19 *
20 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
21 */
22#include <linux/percpu.h>
f120f13e 23#include <linux/hardirq.h>
994025ca 24#include <linux/debugfs.h>
5ead97c8
JF
25
26#include <asm/xen/hypercall.h>
27
28#include "multicalls.h"
994025ca
JF
29#include "debugfs.h"
30
31#define MC_BATCH 32
5ead97c8 32
a122d623
JF
33#define MC_DEBUG 1
34
400d3494 35#define MC_ARGS (MC_BATCH * 16)
5ead97c8 36
994025ca 37
5ead97c8
JF
38struct mc_buffer {
39 struct multicall_entry entries[MC_BATCH];
a122d623
JF
40#if MC_DEBUG
41 struct multicall_entry debug[MC_BATCH];
42#endif
400d3494 43 unsigned char args[MC_ARGS];
91e0c5f3
JF
44 struct callback {
45 void (*fn)(void *);
46 void *data;
47 } callbacks[MC_BATCH];
48 unsigned mcidx, argidx, cbidx;
5ead97c8
JF
49};
50
51static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
52DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
53
994025ca
JF
54/* flush reasons 0- slots, 1- args, 2- callbacks */
55enum flush_reasons
56{
57 FL_SLOTS,
58 FL_ARGS,
59 FL_CALLBACKS,
60
61 FL_N_REASONS
62};
63
64#ifdef CONFIG_XEN_DEBUG_FS
65#define NHYPERCALLS 40 /* not really */
66
67static struct {
68 unsigned histo[MC_BATCH+1];
69
70 unsigned issued;
71 unsigned arg_total;
72 unsigned hypercalls;
73 unsigned histo_hypercalls[NHYPERCALLS];
74
75 unsigned flush[FL_N_REASONS];
76} mc_stats;
77
78static u8 zero_stats;
79
80static inline void check_zero(void)
81{
82 if (unlikely(zero_stats)) {
83 memset(&mc_stats, 0, sizeof(mc_stats));
84 zero_stats = 0;
85 }
86}
87
88static void mc_add_stats(const struct mc_buffer *mc)
89{
90 int i;
91
92 check_zero();
93
94 mc_stats.issued++;
95 mc_stats.hypercalls += mc->mcidx;
96 mc_stats.arg_total += mc->argidx;
97
98 mc_stats.histo[mc->mcidx]++;
99 for(i = 0; i < mc->mcidx; i++) {
100 unsigned op = mc->entries[i].op;
101 if (op < NHYPERCALLS)
102 mc_stats.histo_hypercalls[op]++;
103 }
104}
105
106static void mc_stats_flush(enum flush_reasons idx)
107{
108 check_zero();
109
110 mc_stats.flush[idx]++;
111}
112
113#else /* !CONFIG_XEN_DEBUG_FS */
114
115static inline void mc_add_stats(const struct mc_buffer *mc)
116{
117}
118
119static inline void mc_stats_flush(enum flush_reasons idx)
120{
121}
122#endif /* CONFIG_XEN_DEBUG_FS */
123
5ead97c8
JF
124void xen_mc_flush(void)
125{
f120f13e 126 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
5ead97c8
JF
127 int ret = 0;
128 unsigned long flags;
91e0c5f3 129 int i;
5ead97c8 130
f120f13e
JF
131 BUG_ON(preemptible());
132
5ead97c8
JF
133 /* Disable interrupts in case someone comes in and queues
134 something in the middle */
135 local_irq_save(flags);
136
994025ca
JF
137 mc_add_stats(b);
138
5ead97c8 139 if (b->mcidx) {
a122d623
JF
140#if MC_DEBUG
141 memcpy(b->debug, b->entries,
142 b->mcidx * sizeof(struct multicall_entry));
143#endif
144
5ead97c8
JF
145 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
146 BUG();
147 for (i = 0; i < b->mcidx; i++)
148 if (b->entries[i].result < 0)
149 ret++;
a122d623
JF
150
151#if MC_DEBUG
152 if (ret) {
153 printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
154 ret, smp_processor_id());
8ba6c2b0 155 dump_stack();
7ebed39f 156 for (i = 0; i < b->mcidx; i++) {
f63c2f24 157 printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
a122d623
JF
158 i+1, b->mcidx,
159 b->debug[i].op,
160 b->debug[i].args[0],
161 b->entries[i].result);
162 }
163 }
164#endif
165
5ead97c8
JF
166 b->mcidx = 0;
167 b->argidx = 0;
168 } else
169 BUG_ON(b->argidx != 0);
170
5ead97c8
JF
171 local_irq_restore(flags);
172
7ebed39f 173 for (i = 0; i < b->cbidx; i++) {
91e0c5f3
JF
174 struct callback *cb = &b->callbacks[i];
175
176 (*cb->fn)(cb->data);
177 }
178 b->cbidx = 0;
179
5ead97c8
JF
180 BUG_ON(ret);
181}
182
183struct multicall_space __xen_mc_entry(size_t args)
184{
f120f13e 185 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
5ead97c8 186 struct multicall_space ret;
400d3494 187 unsigned argidx = roundup(b->argidx, sizeof(u64));
5ead97c8 188
f120f13e 189 BUG_ON(preemptible());
400d3494 190 BUG_ON(b->argidx > MC_ARGS);
5ead97c8
JF
191
192 if (b->mcidx == MC_BATCH ||
400d3494 193 (argidx + args) > MC_ARGS) {
994025ca 194 mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
5ead97c8 195 xen_mc_flush();
400d3494
JF
196 argidx = roundup(b->argidx, sizeof(u64));
197 }
5ead97c8
JF
198
199 ret.mc = &b->entries[b->mcidx];
200 b->mcidx++;
400d3494
JF
201 ret.args = &b->args[argidx];
202 b->argidx = argidx + args;
203
204 BUG_ON(b->argidx > MC_ARGS);
205 return ret;
206}
207
208struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
209{
210 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
211 struct multicall_space ret = { NULL, NULL };
212
213 BUG_ON(preemptible());
214 BUG_ON(b->argidx > MC_ARGS);
215
216 if (b->mcidx == 0)
217 return ret;
218
219 if (b->entries[b->mcidx - 1].op != op)
220 return ret;
221
222 if ((b->argidx + size) > MC_ARGS)
223 return ret;
224
225 ret.mc = &b->entries[b->mcidx - 1];
5ead97c8 226 ret.args = &b->args[b->argidx];
400d3494 227 b->argidx += size;
5ead97c8 228
400d3494 229 BUG_ON(b->argidx > MC_ARGS);
5ead97c8
JF
230 return ret;
231}
91e0c5f3
JF
232
233void xen_mc_callback(void (*fn)(void *), void *data)
234{
235 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
236 struct callback *cb;
237
994025ca
JF
238 if (b->cbidx == MC_BATCH) {
239 mc_stats_flush(FL_CALLBACKS);
91e0c5f3 240 xen_mc_flush();
994025ca 241 }
91e0c5f3
JF
242
243 cb = &b->callbacks[b->cbidx++];
244 cb->fn = fn;
245 cb->data = data;
246}
994025ca
JF
247
248#ifdef CONFIG_XEN_DEBUG_FS
249
250static struct dentry *d_mc_debug;
251
252static int __init xen_mc_debugfs(void)
253{
254 struct dentry *d_xen = xen_init_debugfs();
255
256 if (d_xen == NULL)
257 return -ENOMEM;
258
259 d_mc_debug = debugfs_create_dir("multicalls", d_xen);
260
261 debugfs_create_u8("zero_stats", 0644, d_mc_debug, &zero_stats);
262
263 debugfs_create_u32("batches", 0444, d_mc_debug, &mc_stats.issued);
264 debugfs_create_u32("hypercalls", 0444, d_mc_debug, &mc_stats.hypercalls);
265 debugfs_create_u32("arg_total", 0444, d_mc_debug, &mc_stats.arg_total);
266
267 xen_debugfs_create_u32_array("batch_histo", 0444, d_mc_debug,
268 mc_stats.histo, MC_BATCH);
269 xen_debugfs_create_u32_array("hypercall_histo", 0444, d_mc_debug,
270 mc_stats.histo_hypercalls, NHYPERCALLS);
271 xen_debugfs_create_u32_array("flush_reasons", 0444, d_mc_debug,
272 mc_stats.flush, FL_N_REASONS);
273
274 return 0;
275}
276fs_initcall(xen_mc_debugfs);
277
278#endif /* CONFIG_XEN_DEBUG_FS */