]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - arch/ia64/kernel/uncached.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 206
[mirror_ubuntu-eoan-kernel.git] / arch / ia64 / kernel / uncached.c
CommitLineData
25763b3c 1// SPDX-License-Identifier: GPL-2.0-only
f14f75b8 2/*
e4a064df 3 * Copyright (C) 2001-2008 Silicon Graphics, Inc. All rights reserved.
f14f75b8 4 *
f14f75b8
JS
5 * A simple uncached page allocator using the generic allocator. This
6 * allocator first utilizes the spare (spill) pages found in the EFI
7 * memmap and will then start converting cached pages to uncached ones
8 * at a granule at a time. Node awareness is implemented by having a
9 * pool of pages per node.
10 */
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/string.h>
f14f75b8 18#include <linux/efi.h>
38b8d208 19#include <linux/nmi.h>
f14f75b8 20#include <linux/genalloc.h>
5a0e3ad6 21#include <linux/gfp.h>
f14f75b8
JS
22#include <asm/page.h>
23#include <asm/pal.h>
f14f75b8 24#include <asm/pgtable.h>
60063497 25#include <linux/atomic.h>
f14f75b8
JS
26#include <asm/tlbflush.h>
27#include <asm/sn/arch.h>
28
f14f75b8 29
929f9727 30extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
f14f75b8 31
eca7994f
DN
32struct uncached_pool {
33 struct gen_pool *pool;
34 struct mutex add_chunk_mutex; /* serialize adding a converted chunk */
35 int nchunks_added; /* #of converted chunks added to pool */
36 atomic_t status; /* smp called function's return status*/
37};
38
39#define MAX_CONVERTED_CHUNKS_PER_NODE 2
f14f75b8 40
eca7994f 41struct uncached_pool uncached_pools[MAX_NUMNODES];
f14f75b8
JS
42
43
44static void uncached_ipi_visibility(void *data)
45{
46 int status;
eca7994f 47 struct uncached_pool *uc_pool = (struct uncached_pool *)data;
f14f75b8
JS
48
49 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
50 if ((status != PAL_VISIBILITY_OK) &&
51 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
eca7994f 52 atomic_inc(&uc_pool->status);
f14f75b8
JS
53}
54
55
56static void uncached_ipi_mc_drain(void *data)
57{
58 int status;
eca7994f 59 struct uncached_pool *uc_pool = (struct uncached_pool *)data;
929f9727 60
f14f75b8 61 status = ia64_pal_mc_drain();
eca7994f
DN
62 if (status != PAL_STATUS_SUCCESS)
63 atomic_inc(&uc_pool->status);
f14f75b8
JS
64}
65
66
929f9727
DN
67/*
68 * Add a new chunk of uncached memory pages to the specified pool.
69 *
70 * @pool: pool to add new chunk of uncached memory to
71 * @nid: node id of node to allocate memory from, or -1
72 *
73 * This is accomplished by first allocating a granule of cached memory pages
74 * and then converting them to uncached memory pages.
75 */
eca7994f 76static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
f14f75b8
JS
77{
78 struct page *page;
eca7994f 79 int status, i, nchunks_added = uc_pool->nchunks_added;
929f9727 80 unsigned long c_addr, uc_addr;
f14f75b8 81
eca7994f
DN
82 if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
83 return -1; /* interrupted by a signal */
84
85 if (uc_pool->nchunks_added > nchunks_added) {
86 /* someone added a new chunk while we were waiting */
87 mutex_unlock(&uc_pool->add_chunk_mutex);
88 return 0;
89 }
90
91 if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
92 mutex_unlock(&uc_pool->add_chunk_mutex);
929f9727 93 return -1;
eca7994f 94 }
929f9727
DN
95
96 /* attempt to allocate a granule's worth of cached memory pages */
f14f75b8 97
96db800f 98 page = __alloc_pages_node(nid,
e97ca8e5 99 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
f14f75b8 100 IA64_GRANULE_SHIFT-PAGE_SHIFT);
eca7994f
DN
101 if (!page) {
102 mutex_unlock(&uc_pool->add_chunk_mutex);
929f9727 103 return -1;
eca7994f 104 }
f14f75b8 105
929f9727 106 /* convert the memory pages from cached to uncached */
f14f75b8 107
929f9727
DN
108 c_addr = (unsigned long)page_address(page);
109 uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
f14f75b8
JS
110
111 /*
112 * There's a small race here where it's possible for someone to
113 * access the page through /dev/mem halfway through the conversion
114 * to uncached - not sure it's really worth bothering about
115 */
116 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
117 SetPageUncached(&page[i]);
118
285fbd66 119 flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
f14f75b8
JS
120
121 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
eca7994f
DN
122 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
123 atomic_set(&uc_pool->status, 0);
8691e5a8 124 status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
eca7994f 125 if (status || atomic_read(&uc_pool->status))
929f9727 126 goto failed;
eca7994f
DN
127 } else if (status != PAL_VISIBILITY_OK)
128 goto failed;
f14f75b8 129
929f9727
DN
130 preempt_disable();
131
f14f75b8 132 if (ia64_platform_is("sn2"))
929f9727 133 sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
f14f75b8 134 else
929f9727
DN
135 flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
136
137 /* flush the just introduced uncached translation from the TLB */
138 local_flush_tlb_all();
139
140 preempt_enable();
f14f75b8 141
eca7994f
DN
142 status = ia64_pal_mc_drain();
143 if (status != PAL_STATUS_SUCCESS)
144 goto failed;
145 atomic_set(&uc_pool->status, 0);
8691e5a8 146 status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
eca7994f 147 if (status || atomic_read(&uc_pool->status))
929f9727 148 goto failed;
f14f75b8 149
929f9727
DN
150 /*
151 * The chunk of memory pages has been converted to uncached so now we
152 * can add it to the pool.
153 */
eca7994f 154 status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
929f9727
DN
155 if (status)
156 goto failed;
f14f75b8 157
eca7994f
DN
158 uc_pool->nchunks_added++;
159 mutex_unlock(&uc_pool->add_chunk_mutex);
929f9727
DN
160 return 0;
161
162 /* failed to convert or add the chunk so give it back to the kernel */
163failed:
164 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
165 ClearPageUncached(&page[i]);
166
167 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
eca7994f 168 mutex_unlock(&uc_pool->add_chunk_mutex);
929f9727 169 return -1;
f14f75b8
JS
170}
171
172
173/*
174 * uncached_alloc_page
175 *
929f9727 176 * @starting_nid: node id of node to start with, or -1
e4a064df 177 * @n_pages: number of contiguous pages to allocate
929f9727 178 *
e4a064df
DN
179 * Allocate the specified number of contiguous uncached pages on the
180 * the requested node. If not enough contiguous uncached pages are available
181 * on the requested node, roundrobin starting with the next higher node.
f14f75b8 182 */
e4a064df 183unsigned long uncached_alloc_page(int starting_nid, int n_pages)
f14f75b8 184{
929f9727 185 unsigned long uc_addr;
eca7994f 186 struct uncached_pool *uc_pool;
929f9727 187 int nid;
f14f75b8 188
929f9727
DN
189 if (unlikely(starting_nid >= MAX_NUMNODES))
190 return 0;
f14f75b8 191
929f9727
DN
192 if (starting_nid < 0)
193 starting_nid = numa_node_id();
194 nid = starting_nid;
f14f75b8 195
929f9727 196 do {
2dca53a9 197 if (!node_state(nid, N_HIGH_MEMORY))
929f9727 198 continue;
eca7994f
DN
199 uc_pool = &uncached_pools[nid];
200 if (uc_pool->pool == NULL)
929f9727
DN
201 continue;
202 do {
e4a064df
DN
203 uc_addr = gen_pool_alloc(uc_pool->pool,
204 n_pages * PAGE_SIZE);
929f9727
DN
205 if (uc_addr != 0)
206 return uc_addr;
eca7994f 207 } while (uncached_add_chunk(uc_pool, nid) == 0);
929f9727
DN
208
209 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
f14f75b8 210
929f9727 211 return 0;
f14f75b8
JS
212}
213EXPORT_SYMBOL(uncached_alloc_page);
214
215
216/*
217 * uncached_free_page
218 *
e4a064df
DN
219 * @uc_addr: uncached address of first page to free
220 * @n_pages: number of contiguous pages to free
929f9727 221 *
e4a064df 222 * Free the specified number of uncached pages.
f14f75b8 223 */
e4a064df 224void uncached_free_page(unsigned long uc_addr, int n_pages)
f14f75b8 225{
929f9727 226 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
eca7994f 227 struct gen_pool *pool = uncached_pools[nid].pool;
f14f75b8 228
929f9727
DN
229 if (unlikely(pool == NULL))
230 return;
f14f75b8 231
929f9727
DN
232 if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
233 panic("uncached_free_page invalid address %lx\n", uc_addr);
f14f75b8 234
e4a064df 235 gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
f14f75b8
JS
236}
237EXPORT_SYMBOL(uncached_free_page);
238
239
240/*
241 * uncached_build_memmap,
242 *
929f9727
DN
243 * @uc_start: uncached starting address of a chunk of uncached memory
244 * @uc_end: uncached ending address of a chunk of uncached memory
245 * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
246 *
f14f75b8
JS
247 * Called at boot time to build a map of pages that can be used for
248 * memory special operations.
249 */
e088a4ad 250static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg)
f14f75b8 251{
929f9727 252 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
eca7994f 253 struct gen_pool *pool = uncached_pools[nid].pool;
929f9727 254 size_t size = uc_end - uc_start;
f14f75b8 255
386d1d50 256 touch_softlockup_watchdog();
f14f75b8 257
929f9727
DN
258 if (pool != NULL) {
259 memset((char *)uc_start, 0, size);
260 (void) gen_pool_add(pool, uc_start, size, nid);
f14f75b8 261 }
f14f75b8
JS
262 return 0;
263}
264
265
929f9727
DN
266static int __init uncached_init(void)
267{
268 int nid;
f14f75b8 269
2dca53a9 270 for_each_node_state(nid, N_ONLINE) {
eca7994f
DN
271 uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
272 mutex_init(&uncached_pools[nid].add_chunk_mutex);
f14f75b8
JS
273 }
274
929f9727 275 efi_memmap_walk_uc(uncached_build_memmap, NULL);
f14f75b8
JS
276 return 0;
277}
278
279__initcall(uncached_init);