]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/xen/multicalls.c
Merge branch 'sbp2-spindown' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee139...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / xen / multicalls.c
CommitLineData
5ead97c8
JF
1/*
2 * Xen hypercall batching.
3 *
4 * Xen allows multiple hypercalls to be issued at once, using the
5 * multicall interface. This allows the cost of trapping into the
6 * hypervisor to be amortized over several calls.
7 *
8 * This file implements a simple interface for multicalls. There's a
9 * per-cpu buffer of outstanding multicalls. When you want to queue a
10 * multicall for issuing, you can allocate a multicall slot for the
11 * call and its arguments, along with storage for space which is
12 * pointed to by the arguments (for passing pointers to structures,
13 * etc). When the multicall is actually issued, all the space for the
14 * commands and allocated memory is freed for reuse.
15 *
16 * Multicalls are flushed whenever any of the buffers get full, or
17 * when explicitly requested. There's no way to get per-multicall
18 * return results back. It will BUG if any of the multicalls fail.
19 *
20 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
21 */
22#include <linux/percpu.h>
f120f13e 23#include <linux/hardirq.h>
5ead97c8
JF
24
25#include <asm/xen/hypercall.h>
26
27#include "multicalls.h"
28
a122d623
JF
29#define MC_DEBUG 1
30
d66bf8fc 31#define MC_BATCH 32
400d3494 32#define MC_ARGS (MC_BATCH * 16)
5ead97c8
JF
33
34struct mc_buffer {
35 struct multicall_entry entries[MC_BATCH];
a122d623
JF
36#if MC_DEBUG
37 struct multicall_entry debug[MC_BATCH];
38#endif
400d3494 39 unsigned char args[MC_ARGS];
91e0c5f3
JF
40 struct callback {
41 void (*fn)(void *);
42 void *data;
43 } callbacks[MC_BATCH];
44 unsigned mcidx, argidx, cbidx;
5ead97c8
JF
45};
46
47static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
48DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
49
50void xen_mc_flush(void)
51{
f120f13e 52 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
5ead97c8
JF
53 int ret = 0;
54 unsigned long flags;
91e0c5f3 55 int i;
5ead97c8 56
f120f13e
JF
57 BUG_ON(preemptible());
58
5ead97c8
JF
59 /* Disable interrupts in case someone comes in and queues
60 something in the middle */
61 local_irq_save(flags);
62
63 if (b->mcidx) {
a122d623
JF
64#if MC_DEBUG
65 memcpy(b->debug, b->entries,
66 b->mcidx * sizeof(struct multicall_entry));
67#endif
68
5ead97c8
JF
69 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
70 BUG();
71 for (i = 0; i < b->mcidx; i++)
72 if (b->entries[i].result < 0)
73 ret++;
a122d623
JF
74
75#if MC_DEBUG
76 if (ret) {
77 printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
78 ret, smp_processor_id());
7ebed39f 79 for (i = 0; i < b->mcidx; i++) {
a122d623
JF
80 printk(" call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
81 i+1, b->mcidx,
82 b->debug[i].op,
83 b->debug[i].args[0],
84 b->entries[i].result);
85 }
86 }
87#endif
88
5ead97c8
JF
89 b->mcidx = 0;
90 b->argidx = 0;
91 } else
92 BUG_ON(b->argidx != 0);
93
5ead97c8
JF
94 local_irq_restore(flags);
95
7ebed39f 96 for (i = 0; i < b->cbidx; i++) {
91e0c5f3
JF
97 struct callback *cb = &b->callbacks[i];
98
99 (*cb->fn)(cb->data);
100 }
101 b->cbidx = 0;
102
5ead97c8
JF
103 BUG_ON(ret);
104}
105
106struct multicall_space __xen_mc_entry(size_t args)
107{
f120f13e 108 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
5ead97c8 109 struct multicall_space ret;
400d3494 110 unsigned argidx = roundup(b->argidx, sizeof(u64));
5ead97c8 111
f120f13e 112 BUG_ON(preemptible());
400d3494 113 BUG_ON(b->argidx > MC_ARGS);
5ead97c8
JF
114
115 if (b->mcidx == MC_BATCH ||
400d3494 116 (argidx + args) > MC_ARGS) {
5ead97c8 117 xen_mc_flush();
400d3494
JF
118 argidx = roundup(b->argidx, sizeof(u64));
119 }
5ead97c8
JF
120
121 ret.mc = &b->entries[b->mcidx];
122 b->mcidx++;
400d3494
JF
123 ret.args = &b->args[argidx];
124 b->argidx = argidx + args;
125
126 BUG_ON(b->argidx > MC_ARGS);
127 return ret;
128}
129
130struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
131{
132 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
133 struct multicall_space ret = { NULL, NULL };
134
135 BUG_ON(preemptible());
136 BUG_ON(b->argidx > MC_ARGS);
137
138 if (b->mcidx == 0)
139 return ret;
140
141 if (b->entries[b->mcidx - 1].op != op)
142 return ret;
143
144 if ((b->argidx + size) > MC_ARGS)
145 return ret;
146
147 ret.mc = &b->entries[b->mcidx - 1];
5ead97c8 148 ret.args = &b->args[b->argidx];
400d3494 149 b->argidx += size;
5ead97c8 150
400d3494 151 BUG_ON(b->argidx > MC_ARGS);
5ead97c8
JF
152 return ret;
153}
91e0c5f3
JF
154
155void xen_mc_callback(void (*fn)(void *), void *data)
156{
157 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
158 struct callback *cb;
159
160 if (b->cbidx == MC_BATCH)
161 xen_mc_flush();
162
163 cb = &b->callbacks[b->cbidx++];
164 cb->fn = fn;
165 cb->data = data;
166}