]>
Commit | Line | Data |
---|---|---|
aa04b4cc PM |
1 | /* |
2 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License, version 2, as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #include <linux/kvm_host.h> | |
10 | #include <linux/preempt.h> | |
66b15db6 | 11 | #include <linux/export.h> |
aa04b4cc PM |
12 | #include <linux/sched.h> |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/bootmem.h> | |
15 | #include <linux/init.h> | |
fa61a4e3 AK |
16 | #include <linux/memblock.h> |
17 | #include <linux/sizes.h> | |
aa04b4cc PM |
18 | |
19 | #include <asm/cputable.h> | |
20 | #include <asm/kvm_ppc.h> | |
21 | #include <asm/kvm_book3s.h> | |
22 | ||
fa61a4e3 | 23 | #include "book3s_hv_cma.h" |
fa61a4e3 AK |
24 | /* |
25 | * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) | |
26 | * should be power of 2. | |
27 | */ | |
28 | #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ | |
29 | /* | |
30 | * By default we reserve 5% of memory for hash pagetable allocation. | |
31 | */ | |
32 | static unsigned long kvm_cma_resv_ratio = 5; | |
aa04b4cc | 33 | /* |
6c45b810 | 34 | * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area. |
aa04b4cc PM |
35 | * Each RMA has to be physically contiguous and of a size that the |
36 | * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB, | |
37 | * and other larger sizes. Since we are unlikely to be allocate that | |
38 | * much physically contiguous memory after the system is up and running, | |
6c45b810 AK |
39 | * we preallocate a set of RMAs in early boot using CMA. |
40 | * should be power of 2. | |
aa04b4cc | 41 | */ |
6c45b810 AK |
42 | unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */ |
43 | EXPORT_SYMBOL_GPL(kvm_rma_pages); | |
aa04b4cc | 44 | |
aa04b4cc | 45 | /* Work out RMLS (real mode limit selector) field value for a given RMA size. |
9e368f29 | 46 | Assumes POWER7 or PPC970. */ |
aa04b4cc PM |
47 | static inline int lpcr_rmls(unsigned long rma_size) |
48 | { | |
49 | switch (rma_size) { | |
50 | case 32ul << 20: /* 32 MB */ | |
9e368f29 PM |
51 | if (cpu_has_feature(CPU_FTR_ARCH_206)) |
52 | return 8; /* only supported on POWER7 */ | |
53 | return -1; | |
aa04b4cc PM |
54 | case 64ul << 20: /* 64 MB */ |
55 | return 3; | |
56 | case 128ul << 20: /* 128 MB */ | |
57 | return 7; | |
58 | case 256ul << 20: /* 256 MB */ | |
59 | return 4; | |
60 | case 1ul << 30: /* 1 GB */ | |
61 | return 2; | |
62 | case 16ul << 30: /* 16 GB */ | |
63 | return 1; | |
64 | case 256ul << 30: /* 256 GB */ | |
65 | return 0; | |
66 | default: | |
67 | return -1; | |
68 | } | |
69 | } | |
70 | ||
b4e70611 AG |
71 | static int __init early_parse_rma_size(char *p) |
72 | { | |
6c45b810 | 73 | unsigned long kvm_rma_size; |
b4e70611 | 74 | |
6c45b810 AK |
75 | pr_debug("%s(%s)\n", __func__, p); |
76 | if (!p) | |
77 | return -EINVAL; | |
b4e70611 | 78 | kvm_rma_size = memparse(p, &p); |
6c45b810 AK |
79 | /* |
80 | * Check that the requested size is one supported in hardware | |
81 | */ | |
82 | if (lpcr_rmls(kvm_rma_size) < 0) { | |
83 | pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size); | |
84 | return -EINVAL; | |
85 | } | |
86 | kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT; | |
b4e70611 AG |
87 | return 0; |
88 | } | |
89 | early_param("kvm_rma_size", early_parse_rma_size); | |
90 | ||
6c45b810 | 91 | struct kvm_rma_info *kvm_alloc_rma() |
b4e70611 | 92 | { |
6c45b810 AK |
93 | struct page *page; |
94 | struct kvm_rma_info *ri; | |
95 | ||
96 | ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL); | |
97 | if (!ri) | |
98 | return NULL; | |
99 | page = kvm_alloc_cma(kvm_rma_pages, kvm_rma_pages); | |
100 | if (!page) | |
101 | goto err_out; | |
102 | atomic_set(&ri->use_count, 1); | |
103 | ri->base_pfn = page_to_pfn(page); | |
104 | return ri; | |
105 | err_out: | |
106 | kfree(ri); | |
107 | return NULL; | |
b4e70611 AG |
108 | } |
109 | EXPORT_SYMBOL_GPL(kvm_alloc_rma); | |
110 | ||
6c45b810 | 111 | void kvm_release_rma(struct kvm_rma_info *ri) |
b4e70611 | 112 | { |
6c45b810 AK |
113 | if (atomic_dec_and_test(&ri->use_count)) { |
114 | kvm_release_cma(pfn_to_page(ri->base_pfn), kvm_rma_pages); | |
115 | kfree(ri); | |
116 | } | |
b4e70611 AG |
117 | } |
118 | EXPORT_SYMBOL_GPL(kvm_release_rma); | |
119 | ||
fa61a4e3 | 120 | static int __init early_parse_kvm_cma_resv(char *p) |
d2a1b483 | 121 | { |
fa61a4e3 | 122 | pr_debug("%s(%s)\n", __func__, p); |
d2a1b483 | 123 | if (!p) |
fa61a4e3 AK |
124 | return -EINVAL; |
125 | return kstrtoul(p, 0, &kvm_cma_resv_ratio); | |
d2a1b483 | 126 | } |
fa61a4e3 | 127 | early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); |
d2a1b483 | 128 | |
fa61a4e3 | 129 | struct page *kvm_alloc_hpt(unsigned long nr_pages) |
d2a1b483 | 130 | { |
fa61a4e3 AK |
131 | unsigned long align_pages = HPT_ALIGN_PAGES; |
132 | ||
133 | /* Old CPUs require HPT aligned on a multiple of its size */ | |
134 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) | |
135 | align_pages = nr_pages; | |
136 | return kvm_alloc_cma(nr_pages, align_pages); | |
d2a1b483 AG |
137 | } |
138 | EXPORT_SYMBOL_GPL(kvm_alloc_hpt); | |
139 | ||
fa61a4e3 | 140 | void kvm_release_hpt(struct page *page, unsigned long nr_pages) |
d2a1b483 | 141 | { |
fa61a4e3 | 142 | kvm_release_cma(page, nr_pages); |
d2a1b483 AG |
143 | } |
144 | EXPORT_SYMBOL_GPL(kvm_release_hpt); | |
145 | ||
fa61a4e3 AK |
146 | /** |
147 | * kvm_cma_reserve() - reserve area for kvm hash pagetable | |
148 | * | |
149 | * This function reserves memory from early allocator. It should be | |
150 | * called by arch specific code once the early allocator (memblock or bootmem) | |
151 | * has been activated and all other subsystems have already allocated/reserved | |
152 | * memory. | |
153 | */ | |
154 | void __init kvm_cma_reserve(void) | |
155 | { | |
156 | unsigned long align_size; | |
157 | struct memblock_region *reg; | |
158 | phys_addr_t selected_size = 0; | |
159 | /* | |
160 | * We cannot use memblock_phys_mem_size() here, because | |
161 | * memblock_analyze() has not been called yet. | |
162 | */ | |
163 | for_each_memblock(memory, reg) | |
164 | selected_size += memblock_region_memory_end_pfn(reg) - | |
165 | memblock_region_memory_base_pfn(reg); | |
166 | ||
167 | selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; | |
168 | if (selected_size) { | |
169 | pr_debug("%s: reserving %ld MiB for global area\n", __func__, | |
170 | (unsigned long)selected_size / SZ_1M); | |
171 | /* | |
172 | * Old CPUs require HPT aligned on a multiple of its size. So for them | |
173 | * make the alignment as max size we could request. | |
174 | */ | |
175 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) | |
176 | align_size = __rounddown_pow_of_two(selected_size); | |
177 | else | |
178 | align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; | |
6c45b810 AK |
179 | |
180 | align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size); | |
fa61a4e3 AK |
181 | kvm_cma_declare_contiguous(selected_size, align_size); |
182 | } | |
183 | } |