]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm/xen/enlighten.c
arm,arm64/xen: move Xen initialization earlier
[mirror_ubuntu-artful-kernel.git] / arch / arm / xen / enlighten.c
CommitLineData
4c071ee5 1#include <xen/xen.h>
0ec53ecf 2#include <xen/events.h>
b3b52fd8
SS
3#include <xen/grant_table.h>
4#include <xen/hvm.h>
9a9ab3cc 5#include <xen/interface/vcpu.h>
4c071ee5
SS
6#include <xen/interface/xen.h>
7#include <xen/interface/memory.h>
b3b52fd8 8#include <xen/interface/hvm/params.h>
ef61ee0d 9#include <xen/features.h>
4c071ee5 10#include <xen/platform_pci.h>
b3b52fd8 11#include <xen/xenbus.h>
c61ba729 12#include <xen/page.h>
6abb749e 13#include <xen/interface/sched.h>
f832da06 14#include <xen/xen-ops.h>
4c071ee5
SS
15#include <asm/xen/hypervisor.h>
16#include <asm/xen/hypercall.h>
6abb749e 17#include <asm/system_misc.h>
0ec53ecf
SS
18#include <linux/interrupt.h>
19#include <linux/irqreturn.h>
4c071ee5 20#include <linux/module.h>
2e01f166
SS
21#include <linux/of.h>
22#include <linux/of_irq.h>
23#include <linux/of_address.h>
e1a9c16b
JG
24#include <linux/cpuidle.h>
25#include <linux/cpufreq.h>
8b271d57 26#include <linux/cpu.h>
4c071ee5 27
f832da06
IC
28#include <linux/mm.h>
29
4c071ee5
SS
30struct start_info _xen_start_info;
31struct start_info *xen_start_info = &_xen_start_info;
35c8ab4c 32EXPORT_SYMBOL(xen_start_info);
4c071ee5
SS
33
34enum xen_domain_type xen_domain_type = XEN_NATIVE;
35c8ab4c 35EXPORT_SYMBOL(xen_domain_type);
4c071ee5
SS
36
37struct shared_info xen_dummy_shared_info;
38struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
39
40DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
9a9ab3cc 41static struct vcpu_info __percpu *xen_vcpu_info;
4c071ee5 42
c61ba729
IC
43/* These are unused until we support booting "pre-ballooned" */
44unsigned long xen_released_pages;
45struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
46
4c071ee5
SS
47/* TODO: to be removed */
48__read_mostly int xen_have_vector_callback;
49EXPORT_SYMBOL_GPL(xen_have_vector_callback);
50
51int xen_platform_pci_unplug = XEN_UNPLUG_ALL;
52EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
53
81e863c3 54static __read_mostly unsigned int xen_events_irq;
0ec53ecf 55
5882bfef
SS
56static __initdata struct device_node *xen_node;
57
4e8c0c8c 58int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
4c071ee5 59 unsigned long addr,
4e8c0c8c
DV
60 xen_pfn_t *mfn, int nr,
61 int *err_ptr, pgprot_t prot,
62 unsigned domid,
f832da06 63 struct page **pages)
4c071ee5 64{
4e8c0c8c 65 return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
628c28ee 66 prot, domid, pages);
4c071ee5 67}
4e8c0c8c
DV
68EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
69
70/* Not used by XENFEAT_auto_translated guests. */
71int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
72 unsigned long addr,
73 xen_pfn_t mfn, int nr,
74 pgprot_t prot, unsigned domid,
75 struct page **pages)
76{
77 return -ENOSYS;
78}
4c071ee5 79EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2e01f166 80
f832da06
IC
81int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
82 int nr, struct page **pages)
83{
628c28ee 84 return xen_xlate_unmap_gfn_range(vma, nr, pages);
f832da06
IC
85}
86EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
87
8b271d57 88static void xen_percpu_init(void)
9a9ab3cc
SS
89{
90 struct vcpu_register_vcpu_info info;
91 struct vcpu_info *vcpup;
92 int err;
3cc8e40e 93 int cpu = get_cpu();
9a9ab3cc
SS
94
95 pr_info("Xen: initializing cpu%d\n", cpu);
96 vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
97
98 info.mfn = __pa(vcpup) >> PAGE_SHIFT;
99 info.offset = offset_in_page(vcpup);
100
101 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
d7266d78
SS
102 BUG_ON(err);
103 per_cpu(xen_vcpu, cpu) = vcpup;
104
3cc8e40e 105 enable_percpu_irq(xen_events_irq, 0);
0d7febe5 106 put_cpu();
9a9ab3cc
SS
107}
108
2451ade0 109static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
6abb749e
SS
110{
111 struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
112 int rc;
113 rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
a91c7775 114 BUG_ON(rc);
6abb749e
SS
115}
116
117static void xen_power_off(void)
118{
119 struct sched_shutdown r = { .reason = SHUTDOWN_poweroff };
120 int rc;
121 rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
a91c7775 122 BUG_ON(rc);
6abb749e
SS
123}
124
8b271d57
JG
125static int xen_cpu_notification(struct notifier_block *self,
126 unsigned long action,
127 void *hcpu)
128{
129 switch (action) {
130 case CPU_STARTING:
131 xen_percpu_init();
132 break;
133 default:
134 break;
135 }
136
137 return NOTIFY_OK;
138}
139
140static struct notifier_block xen_cpu_notifier = {
141 .notifier_call = xen_cpu_notification,
142};
143
144static irqreturn_t xen_arm_callback(int irq, void *arg)
145{
146 xen_hvm_evtchn_do_upcall();
147 return IRQ_HANDLED;
148}
149
2e01f166
SS
150/*
151 * see Documentation/devicetree/bindings/arm/xen.txt for the
152 * documentation of the Xen Device Tree format.
153 */
b3b52fd8 154#define GRANT_TABLE_PHYSADDR 0
5882bfef 155void __init xen_early_init(void)
2e01f166 156{
2e01f166
SS
157 int len;
158 const char *s = NULL;
159 const char *version = NULL;
160 const char *xen_prefix = "xen,xen-";
161
5882bfef
SS
162 xen_node = of_find_compatible_node(NULL, NULL, "xen,xen");
163 if (!xen_node) {
2e01f166 164 pr_debug("No Xen support\n");
5882bfef 165 return;
2e01f166 166 }
5882bfef 167 s = of_get_property(xen_node, "compatible", &len);
2e01f166
SS
168 if (strlen(xen_prefix) + 3 < len &&
169 !strncmp(xen_prefix, s, strlen(xen_prefix)))
170 version = s + strlen(xen_prefix);
171 if (version == NULL) {
172 pr_debug("Xen version not found\n");
5882bfef 173 return;
81e863c3
JG
174 }
175
5882bfef 176 pr_info("Xen %s support found\n", version);
8b271d57 177
2e01f166
SS
178 xen_domain_type = XEN_HVM_DOMAIN;
179
ef61ee0d 180 xen_setup_features();
5ebc77de 181
ef61ee0d
SS
182 if (xen_feature(XENFEAT_dom0))
183 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
184 else
185 xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
5882bfef
SS
186}
187
188static int __init xen_guest_init(void)
189{
190 struct xen_add_to_physmap xatp;
191 struct shared_info *shared_info_page = NULL;
192 struct resource res;
193 phys_addr_t grant_frames;
194
195 if (!xen_domain())
196 return 0;
197
198 if (of_address_to_resource(xen_node, GRANT_TABLE_PHYSADDR, &res)) {
199 pr_err("Xen grant table base address not found\n");
200 return -ENODEV;
201 }
202 grant_frames = res.start;
203
204 xen_events_irq = irq_of_parse_and_map(xen_node, 0);
205 if (!xen_events_irq) {
206 pr_err("Xen event channel interrupt not found\n");
207 return -ENODEV;
208 }
209
210 shared_info_page = (struct shared_info *)get_zeroed_page(GFP_KERNEL);
ef61ee0d 211
2e01f166
SS
212 if (!shared_info_page) {
213 pr_err("not enough memory\n");
214 return -ENOMEM;
215 }
216 xatp.domid = DOMID_SELF;
217 xatp.idx = 0;
218 xatp.space = XENMAPSPACE_shared_info;
219 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
220 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
221 BUG();
222
223 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
224
225 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
226 * page, we use it in the event channel upcall and in some pvclock
9a9ab3cc 227 * related functions.
2e01f166
SS
228 * The shared info contains exactly 1 CPU (the boot CPU). The guest
229 * is required to use VCPUOP_register_vcpu_info to place vcpu info
9a9ab3cc
SS
230 * for secondary CPUs as they are brought up.
231 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
232 */
233 xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
234 sizeof(struct vcpu_info));
235 if (xen_vcpu_info == NULL)
236 return -ENOMEM;
b3b52fd8 237
efaf30a3
KRW
238 if (gnttab_setup_auto_xlat_frames(grant_frames)) {
239 free_percpu(xen_vcpu_info);
240 return -ENOMEM;
241 }
b3b52fd8
SS
242 gnttab_init();
243 if (!xen_initial_domain())
244 xenbus_probe(NULL);
245
e1a9c16b
JG
246 /*
247 * Making sure board specific code will not set up ops for
248 * cpu idle and cpu freq.
249 */
250 disable_cpuidle();
251 disable_cpufreq();
252
8b271d57
JG
253 xen_init_IRQ();
254
255 if (request_percpu_irq(xen_events_irq, xen_arm_callback,
256 "events", &xen_vcpu)) {
257 pr_err("Error request IRQ %d\n", xen_events_irq);
258 return -EINVAL;
259 }
260
261 xen_percpu_init();
262
263 register_cpu_notifier(&xen_cpu_notifier);
264
1aa3d8d9
SS
265 return 0;
266}
8b271d57 267early_initcall(xen_guest_init);
1aa3d8d9
SS
268
269static int __init xen_pm_init(void)
270{
9dd4b294
RH
271 if (!xen_domain())
272 return -ENODEV;
273
6abb749e
SS
274 pm_power_off = xen_power_off;
275 arm_pm_restart = xen_restart;
276
2e01f166
SS
277 return 0;
278}
9dd4b294 279late_initcall(xen_pm_init);
0ec53ecf 280
79390289
SS
281
282/* empty stubs */
283void xen_arch_pre_suspend(void) { }
284void xen_arch_post_suspend(int suspend_cancelled) { }
285void xen_timer_resume(void) { }
286void xen_arch_resume(void) { }
ffb7dbed 287void xen_arch_suspend(void) { }
79390289
SS
288
289
911dec0d
KRW
290/* In the hypervisor.S file. */
291EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
292EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
ab277bbf
SS
293EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
294EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
295EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
296EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
297EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
298EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
ea0af613 299EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
176455e9 300EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op);
9f1d3414 301EXPORT_SYMBOL_GPL(HYPERVISOR_multicall);
911dec0d 302EXPORT_SYMBOL_GPL(privcmd_call);