]>
Commit | Line | Data |
---|---|---|
f14f75b8 | 1 | /* |
e4a064df | 2 | * Copyright (C) 2001-2008 Silicon Graphics, Inc. All rights reserved. |
f14f75b8 JS |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | * | |
8 | * A simple uncached page allocator using the generic allocator. This | |
9 | * allocator first utilizes the spare (spill) pages found in the EFI | |
10 | * memmap and will then start converting cached pages to uncached ones | |
11 | * at a granule at a time. Node awareness is implemented by having a | |
12 | * pool of pages per node. | |
13 | */ | |
14 | ||
15 | #include <linux/types.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/errno.h> | |
20 | #include <linux/string.h> | |
f14f75b8 JS |
21 | #include <linux/efi.h> |
22 | #include <linux/genalloc.h> | |
5a0e3ad6 | 23 | #include <linux/gfp.h> |
f14f75b8 JS |
24 | #include <asm/page.h> |
25 | #include <asm/pal.h> | |
26 | #include <asm/system.h> | |
27 | #include <asm/pgtable.h> | |
28 | #include <asm/atomic.h> | |
29 | #include <asm/tlbflush.h> | |
30 | #include <asm/sn/arch.h> | |
31 | ||
f14f75b8 | 32 | |
929f9727 | 33 | extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); |
f14f75b8 | 34 | |
eca7994f DN |
35 | struct uncached_pool { |
36 | struct gen_pool *pool; | |
37 | struct mutex add_chunk_mutex; /* serialize adding a converted chunk */ | |
38 | int nchunks_added; /* #of converted chunks added to pool */ | |
39 | atomic_t status; /* smp called function's return status*/ | |
40 | }; | |
41 | ||
42 | #define MAX_CONVERTED_CHUNKS_PER_NODE 2 | |
f14f75b8 | 43 | |
eca7994f | 44 | struct uncached_pool uncached_pools[MAX_NUMNODES]; |
f14f75b8 JS |
45 | |
46 | ||
47 | static void uncached_ipi_visibility(void *data) | |
48 | { | |
49 | int status; | |
eca7994f | 50 | struct uncached_pool *uc_pool = (struct uncached_pool *)data; |
f14f75b8 JS |
51 | |
52 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | |
53 | if ((status != PAL_VISIBILITY_OK) && | |
54 | (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) | |
eca7994f | 55 | atomic_inc(&uc_pool->status); |
f14f75b8 JS |
56 | } |
57 | ||
58 | ||
59 | static void uncached_ipi_mc_drain(void *data) | |
60 | { | |
61 | int status; | |
eca7994f | 62 | struct uncached_pool *uc_pool = (struct uncached_pool *)data; |
929f9727 | 63 | |
f14f75b8 | 64 | status = ia64_pal_mc_drain(); |
eca7994f DN |
65 | if (status != PAL_STATUS_SUCCESS) |
66 | atomic_inc(&uc_pool->status); | |
f14f75b8 JS |
67 | } |
68 | ||
69 | ||
929f9727 DN |
70 | /* |
71 | * Add a new chunk of uncached memory pages to the specified pool. | |
72 | * | |
73 | * @pool: pool to add new chunk of uncached memory to | |
74 | * @nid: node id of node to allocate memory from, or -1 | |
75 | * | |
76 | * This is accomplished by first allocating a granule of cached memory pages | |
77 | * and then converting them to uncached memory pages. | |
78 | */ | |
eca7994f | 79 | static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) |
f14f75b8 JS |
80 | { |
81 | struct page *page; | |
eca7994f | 82 | int status, i, nchunks_added = uc_pool->nchunks_added; |
929f9727 | 83 | unsigned long c_addr, uc_addr; |
f14f75b8 | 84 | |
eca7994f DN |
85 | if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0) |
86 | return -1; /* interrupted by a signal */ | |
87 | ||
88 | if (uc_pool->nchunks_added > nchunks_added) { | |
89 | /* someone added a new chunk while we were waiting */ | |
90 | mutex_unlock(&uc_pool->add_chunk_mutex); | |
91 | return 0; | |
92 | } | |
93 | ||
94 | if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) { | |
95 | mutex_unlock(&uc_pool->add_chunk_mutex); | |
929f9727 | 96 | return -1; |
eca7994f | 97 | } |
929f9727 DN |
98 | |
99 | /* attempt to allocate a granule's worth of cached memory pages */ | |
f14f75b8 | 100 | |
6484eb3e MG |
101 | page = alloc_pages_exact_node(nid, |
102 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | |
f14f75b8 | 103 | IA64_GRANULE_SHIFT-PAGE_SHIFT); |
eca7994f DN |
104 | if (!page) { |
105 | mutex_unlock(&uc_pool->add_chunk_mutex); | |
929f9727 | 106 | return -1; |
eca7994f | 107 | } |
f14f75b8 | 108 | |
929f9727 | 109 | /* convert the memory pages from cached to uncached */ |
f14f75b8 | 110 | |
929f9727 DN |
111 | c_addr = (unsigned long)page_address(page); |
112 | uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; | |
f14f75b8 JS |
113 | |
114 | /* | |
115 | * There's a small race here where it's possible for someone to | |
116 | * access the page through /dev/mem halfway through the conversion | |
117 | * to uncached - not sure it's really worth bothering about | |
118 | */ | |
119 | for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) | |
120 | SetPageUncached(&page[i]); | |
121 | ||
285fbd66 | 122 | flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); |
f14f75b8 JS |
123 | |
124 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | |
eca7994f DN |
125 | if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { |
126 | atomic_set(&uc_pool->status, 0); | |
8691e5a8 | 127 | status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); |
eca7994f | 128 | if (status || atomic_read(&uc_pool->status)) |
929f9727 | 129 | goto failed; |
eca7994f DN |
130 | } else if (status != PAL_VISIBILITY_OK) |
131 | goto failed; | |
f14f75b8 | 132 | |
929f9727 DN |
133 | preempt_disable(); |
134 | ||
f14f75b8 | 135 | if (ia64_platform_is("sn2")) |
929f9727 | 136 | sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE); |
f14f75b8 | 137 | else |
929f9727 DN |
138 | flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); |
139 | ||
140 | /* flush the just introduced uncached translation from the TLB */ | |
141 | local_flush_tlb_all(); | |
142 | ||
143 | preempt_enable(); | |
f14f75b8 | 144 | |
eca7994f DN |
145 | status = ia64_pal_mc_drain(); |
146 | if (status != PAL_STATUS_SUCCESS) | |
147 | goto failed; | |
148 | atomic_set(&uc_pool->status, 0); | |
8691e5a8 | 149 | status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); |
eca7994f | 150 | if (status || atomic_read(&uc_pool->status)) |
929f9727 | 151 | goto failed; |
f14f75b8 | 152 | |
929f9727 DN |
153 | /* |
154 | * The chunk of memory pages has been converted to uncached so now we | |
155 | * can add it to the pool. | |
156 | */ | |
eca7994f | 157 | status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); |
929f9727 DN |
158 | if (status) |
159 | goto failed; | |
f14f75b8 | 160 | |
eca7994f DN |
161 | uc_pool->nchunks_added++; |
162 | mutex_unlock(&uc_pool->add_chunk_mutex); | |
929f9727 DN |
163 | return 0; |
164 | ||
165 | /* failed to convert or add the chunk so give it back to the kernel */ | |
166 | failed: | |
167 | for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) | |
168 | ClearPageUncached(&page[i]); | |
169 | ||
170 | free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); | |
eca7994f | 171 | mutex_unlock(&uc_pool->add_chunk_mutex); |
929f9727 | 172 | return -1; |
f14f75b8 JS |
173 | } |
174 | ||
175 | ||
176 | /* | |
177 | * uncached_alloc_page | |
178 | * | |
929f9727 | 179 | * @starting_nid: node id of node to start with, or -1 |
e4a064df | 180 | * @n_pages: number of contiguous pages to allocate |
929f9727 | 181 | * |
e4a064df DN |
182 | * Allocate the specified number of contiguous uncached pages on the |
183 | * the requested node. If not enough contiguous uncached pages are available | |
184 | * on the requested node, roundrobin starting with the next higher node. | |
f14f75b8 | 185 | */ |
e4a064df | 186 | unsigned long uncached_alloc_page(int starting_nid, int n_pages) |
f14f75b8 | 187 | { |
929f9727 | 188 | unsigned long uc_addr; |
eca7994f | 189 | struct uncached_pool *uc_pool; |
929f9727 | 190 | int nid; |
f14f75b8 | 191 | |
929f9727 DN |
192 | if (unlikely(starting_nid >= MAX_NUMNODES)) |
193 | return 0; | |
f14f75b8 | 194 | |
929f9727 DN |
195 | if (starting_nid < 0) |
196 | starting_nid = numa_node_id(); | |
197 | nid = starting_nid; | |
f14f75b8 | 198 | |
929f9727 | 199 | do { |
2dca53a9 | 200 | if (!node_state(nid, N_HIGH_MEMORY)) |
929f9727 | 201 | continue; |
eca7994f DN |
202 | uc_pool = &uncached_pools[nid]; |
203 | if (uc_pool->pool == NULL) | |
929f9727 DN |
204 | continue; |
205 | do { | |
e4a064df DN |
206 | uc_addr = gen_pool_alloc(uc_pool->pool, |
207 | n_pages * PAGE_SIZE); | |
929f9727 DN |
208 | if (uc_addr != 0) |
209 | return uc_addr; | |
eca7994f | 210 | } while (uncached_add_chunk(uc_pool, nid) == 0); |
929f9727 DN |
211 | |
212 | } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); | |
f14f75b8 | 213 | |
929f9727 | 214 | return 0; |
f14f75b8 JS |
215 | } |
216 | EXPORT_SYMBOL(uncached_alloc_page); | |
217 | ||
218 | ||
219 | /* | |
220 | * uncached_free_page | |
221 | * | |
e4a064df DN |
222 | * @uc_addr: uncached address of first page to free |
223 | * @n_pages: number of contiguous pages to free | |
929f9727 | 224 | * |
e4a064df | 225 | * Free the specified number of uncached pages. |
f14f75b8 | 226 | */ |
e4a064df | 227 | void uncached_free_page(unsigned long uc_addr, int n_pages) |
f14f75b8 | 228 | { |
929f9727 | 229 | int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); |
eca7994f | 230 | struct gen_pool *pool = uncached_pools[nid].pool; |
f14f75b8 | 231 | |
929f9727 DN |
232 | if (unlikely(pool == NULL)) |
233 | return; | |
f14f75b8 | 234 | |
929f9727 DN |
235 | if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET) |
236 | panic("uncached_free_page invalid address %lx\n", uc_addr); | |
f14f75b8 | 237 | |
e4a064df | 238 | gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE); |
f14f75b8 JS |
239 | } |
240 | EXPORT_SYMBOL(uncached_free_page); | |
241 | ||
242 | ||
243 | /* | |
244 | * uncached_build_memmap, | |
245 | * | |
929f9727 DN |
246 | * @uc_start: uncached starting address of a chunk of uncached memory |
247 | * @uc_end: uncached ending address of a chunk of uncached memory | |
248 | * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc()) | |
249 | * | |
f14f75b8 JS |
250 | * Called at boot time to build a map of pages that can be used for |
251 | * memory special operations. | |
252 | */ | |
e088a4ad | 253 | static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg) |
f14f75b8 | 254 | { |
929f9727 | 255 | int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); |
eca7994f | 256 | struct gen_pool *pool = uncached_pools[nid].pool; |
929f9727 | 257 | size_t size = uc_end - uc_start; |
f14f75b8 | 258 | |
386d1d50 | 259 | touch_softlockup_watchdog(); |
f14f75b8 | 260 | |
929f9727 DN |
261 | if (pool != NULL) { |
262 | memset((char *)uc_start, 0, size); | |
263 | (void) gen_pool_add(pool, uc_start, size, nid); | |
f14f75b8 | 264 | } |
f14f75b8 JS |
265 | return 0; |
266 | } | |
267 | ||
268 | ||
929f9727 DN |
269 | static int __init uncached_init(void) |
270 | { | |
271 | int nid; | |
f14f75b8 | 272 | |
2dca53a9 | 273 | for_each_node_state(nid, N_ONLINE) { |
eca7994f DN |
274 | uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid); |
275 | mutex_init(&uncached_pools[nid].add_chunk_mutex); | |
f14f75b8 JS |
276 | } |
277 | ||
929f9727 | 278 | efi_memmap_walk_uc(uncached_build_memmap, NULL); |
f14f75b8 JS |
279 | return 0; |
280 | } | |
281 | ||
282 | __initcall(uncached_init); |