]>
Commit | Line | Data |
---|---|---|
9e2369c0 RPM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/errno.h> | |
3 | #include <linux/gfp.h> | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/mm.h> | |
6 | #include <linux/memremap.h> | |
7 | #include <linux/slab.h> | |
8 | ||
9 | #include <asm/page.h> | |
10 | ||
11 | #include <xen/page.h> | |
12 | #include <xen/xen.h> | |
13 | ||
14 | static DEFINE_MUTEX(list_lock); | |
15 | static LIST_HEAD(page_list); | |
16 | static unsigned int list_count; | |
17 | ||
18 | static int fill_list(unsigned int nr_pages) | |
19 | { | |
20 | struct dev_pagemap *pgmap; | |
21 | void *vaddr; | |
22 | unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); | |
23 | int ret; | |
24 | ||
25 | pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); | |
26 | if (!pgmap) | |
27 | return -ENOMEM; | |
28 | ||
29 | pgmap->type = MEMORY_DEVICE_GENERIC; | |
30 | pgmap->res.name = "Xen scratch"; | |
31 | pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; | |
32 | ||
33 | ret = allocate_resource(&iomem_resource, &pgmap->res, | |
34 | alloc_pages * PAGE_SIZE, 0, -1, | |
35 | PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); | |
36 | if (ret < 0) { | |
37 | pr_err("Cannot allocate new IOMEM resource\n"); | |
38 | kfree(pgmap); | |
39 | return ret; | |
40 | } | |
41 | ||
42 | #ifdef CONFIG_XEN_HAVE_PVMMU | |
43 | /* | |
44 | * memremap will build page tables for the new memory so | |
45 | * the p2m must contain invalid entries so the correct | |
46 | * non-present PTEs will be written. | |
47 | * | |
48 | * If a failure occurs, the original (identity) p2m entries | |
49 | * are not restored since this region is now known not to | |
50 | * conflict with any devices. | |
51 | */ | |
52 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | |
53 | xen_pfn_t pfn = PFN_DOWN(pgmap->res.start); | |
54 | ||
55 | for (i = 0; i < alloc_pages; i++) { | |
56 | if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { | |
57 | pr_warn("set_phys_to_machine() failed, no memory added\n"); | |
58 | release_resource(&pgmap->res); | |
59 | kfree(pgmap); | |
60 | return -ENOMEM; | |
61 | } | |
62 | } | |
63 | } | |
64 | #endif | |
65 | ||
66 | vaddr = memremap_pages(pgmap, NUMA_NO_NODE); | |
67 | if (IS_ERR(vaddr)) { | |
68 | pr_err("Cannot remap memory range\n"); | |
69 | release_resource(&pgmap->res); | |
70 | kfree(pgmap); | |
71 | return PTR_ERR(vaddr); | |
72 | } | |
73 | ||
74 | for (i = 0; i < alloc_pages; i++) { | |
75 | struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i); | |
76 | ||
77 | BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i)); | |
78 | list_add(&pg->lru, &page_list); | |
79 | list_count++; | |
80 | } | |
81 | ||
82 | return 0; | |
83 | } | |
84 | ||
85 | /** | |
86 | * xen_alloc_unpopulated_pages - alloc unpopulated pages | |
87 | * @nr_pages: Number of pages | |
88 | * @pages: pages returned | |
89 | * @return 0 on success, error otherwise | |
90 | */ | |
91 | int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) | |
92 | { | |
93 | unsigned int i; | |
94 | int ret = 0; | |
95 | ||
96 | mutex_lock(&list_lock); | |
97 | if (list_count < nr_pages) { | |
98 | ret = fill_list(nr_pages - list_count); | |
99 | if (ret) | |
100 | goto out; | |
101 | } | |
102 | ||
103 | for (i = 0; i < nr_pages; i++) { | |
104 | struct page *pg = list_first_entry_or_null(&page_list, | |
105 | struct page, | |
106 | lru); | |
107 | ||
108 | BUG_ON(!pg); | |
109 | list_del(&pg->lru); | |
110 | list_count--; | |
111 | pages[i] = pg; | |
112 | ||
113 | #ifdef CONFIG_XEN_HAVE_PVMMU | |
114 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | |
115 | ret = xen_alloc_p2m_entry(page_to_pfn(pg)); | |
116 | if (ret < 0) { | |
117 | unsigned int j; | |
118 | ||
119 | for (j = 0; j <= i; j++) { | |
120 | list_add(&pages[j]->lru, &page_list); | |
121 | list_count++; | |
122 | } | |
123 | goto out; | |
124 | } | |
125 | } | |
126 | #endif | |
127 | } | |
128 | ||
129 | out: | |
130 | mutex_unlock(&list_lock); | |
131 | return ret; | |
132 | } | |
133 | EXPORT_SYMBOL(xen_alloc_unpopulated_pages); | |
134 | ||
135 | /** | |
136 | * xen_free_unpopulated_pages - return unpopulated pages | |
137 | * @nr_pages: Number of pages | |
138 | * @pages: pages to return | |
139 | */ | |
140 | void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) | |
141 | { | |
142 | unsigned int i; | |
143 | ||
144 | mutex_lock(&list_lock); | |
145 | for (i = 0; i < nr_pages; i++) { | |
146 | list_add(&pages[i]->lru, &page_list); | |
147 | list_count++; | |
148 | } | |
149 | mutex_unlock(&list_lock); | |
150 | } | |
151 | EXPORT_SYMBOL(xen_free_unpopulated_pages); | |
152 | ||
153 | #ifdef CONFIG_XEN_PV | |
154 | static int __init init(void) | |
155 | { | |
156 | unsigned int i; | |
157 | ||
158 | if (!xen_domain()) | |
159 | return -ENODEV; | |
160 | ||
161 | if (!xen_pv_domain()) | |
162 | return 0; | |
163 | ||
164 | /* | |
165 | * Initialize with pages from the extra memory regions (see | |
166 | * arch/x86/xen/setup.c). | |
167 | */ | |
168 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { | |
169 | unsigned int j; | |
170 | ||
171 | for (j = 0; j < xen_extra_mem[i].n_pfns; j++) { | |
172 | struct page *pg = | |
173 | pfn_to_page(xen_extra_mem[i].start_pfn + j); | |
174 | ||
175 | list_add(&pg->lru, &page_list); | |
176 | list_count++; | |
177 | } | |
178 | } | |
179 | ||
180 | return 0; | |
181 | } | |
182 | subsys_initcall(init); | |
183 | #endif |