]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kvm/book3s_hv_builtin.c
powerpc: io-workarounds.c was implicitly getting init_mm
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kvm / book3s_hv_builtin.c
CommitLineData
aa04b4cc
PM
1/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/kvm_host.h>
10#include <linux/preempt.h>
11#include <linux/sched.h>
12#include <linux/spinlock.h>
13#include <linux/bootmem.h>
14#include <linux/init.h>
15
16#include <asm/cputable.h>
17#include <asm/kvm_ppc.h>
18#include <asm/kvm_book3s.h>
19
20/*
21 * This maintains a list of RMAs (real mode areas) for KVM guests to use.
22 * Each RMA has to be physically contiguous and of a size that the
23 * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
24 * and other larger sizes. Since we are unlikely to be allocate that
25 * much physically contiguous memory after the system is up and running,
26 * we preallocate a set of RMAs in early boot for KVM to use.
27 */
28static unsigned long kvm_rma_size = 64 << 20; /* 64MB */
29static unsigned long kvm_rma_count;
30
31static int __init early_parse_rma_size(char *p)
32{
33 if (!p)
34 return 1;
35
36 kvm_rma_size = memparse(p, &p);
37
38 return 0;
39}
40early_param("kvm_rma_size", early_parse_rma_size);
41
42static int __init early_parse_rma_count(char *p)
43{
44 if (!p)
45 return 1;
46
47 kvm_rma_count = simple_strtoul(p, NULL, 0);
48
49 return 0;
50}
51early_param("kvm_rma_count", early_parse_rma_count);
52
53static struct kvmppc_rma_info *rma_info;
54static LIST_HEAD(free_rmas);
55static DEFINE_SPINLOCK(rma_lock);
56
57/* Work out RMLS (real mode limit selector) field value for a given RMA size.
9e368f29 58 Assumes POWER7 or PPC970. */
aa04b4cc
PM
59static inline int lpcr_rmls(unsigned long rma_size)
60{
61 switch (rma_size) {
62 case 32ul << 20: /* 32 MB */
9e368f29
PM
63 if (cpu_has_feature(CPU_FTR_ARCH_206))
64 return 8; /* only supported on POWER7 */
65 return -1;
aa04b4cc
PM
66 case 64ul << 20: /* 64 MB */
67 return 3;
68 case 128ul << 20: /* 128 MB */
69 return 7;
70 case 256ul << 20: /* 256 MB */
71 return 4;
72 case 1ul << 30: /* 1 GB */
73 return 2;
74 case 16ul << 30: /* 16 GB */
75 return 1;
76 case 256ul << 30: /* 256 GB */
77 return 0;
78 default:
79 return -1;
80 }
81}
82
83/*
84 * Called at boot time while the bootmem allocator is active,
85 * to allocate contiguous physical memory for the real memory
86 * areas for guests.
87 */
88void kvm_rma_init(void)
89{
90 unsigned long i;
91 unsigned long j, npages;
92 void *rma;
93 struct page *pg;
94
9e368f29
PM
95 /* Only do this on PPC970 in HV mode */
96 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
97 !cpu_has_feature(CPU_FTR_ARCH_201))
aa04b4cc
PM
98 return;
99
100 if (!kvm_rma_size || !kvm_rma_count)
101 return;
102
103 /* Check that the requested size is one supported in hardware */
104 if (lpcr_rmls(kvm_rma_size) < 0) {
105 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
106 return;
107 }
108
109 npages = kvm_rma_size >> PAGE_SHIFT;
110 rma_info = alloc_bootmem(kvm_rma_count * sizeof(struct kvmppc_rma_info));
111 for (i = 0; i < kvm_rma_count; ++i) {
112 rma = alloc_bootmem_align(kvm_rma_size, kvm_rma_size);
113 pr_info("Allocated KVM RMA at %p (%ld MB)\n", rma,
114 kvm_rma_size >> 20);
115 rma_info[i].base_virt = rma;
116 rma_info[i].base_pfn = __pa(rma) >> PAGE_SHIFT;
117 rma_info[i].npages = npages;
118 list_add_tail(&rma_info[i].list, &free_rmas);
119 atomic_set(&rma_info[i].use_count, 0);
120
121 pg = pfn_to_page(rma_info[i].base_pfn);
122 for (j = 0; j < npages; ++j) {
123 atomic_inc(&pg->_count);
124 ++pg;
125 }
126 }
127}
128
129struct kvmppc_rma_info *kvm_alloc_rma(void)
130{
131 struct kvmppc_rma_info *ri;
132
133 ri = NULL;
134 spin_lock(&rma_lock);
135 if (!list_empty(&free_rmas)) {
136 ri = list_first_entry(&free_rmas, struct kvmppc_rma_info, list);
137 list_del(&ri->list);
138 atomic_inc(&ri->use_count);
139 }
140 spin_unlock(&rma_lock);
141 return ri;
142}
143EXPORT_SYMBOL_GPL(kvm_alloc_rma);
144
145void kvm_release_rma(struct kvmppc_rma_info *ri)
146{
147 if (atomic_dec_and_test(&ri->use_count)) {
148 spin_lock(&rma_lock);
149 list_add_tail(&ri->list, &free_rmas);
150 spin_unlock(&rma_lock);
151
152 }
153}
154EXPORT_SYMBOL_GPL(kvm_release_rma);
155