]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/arm64/mm/pageattr.c
Merge tag 'objtool-core-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / arch / arm64 / mm / pageattr.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4 */
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/vmalloc.h>
10
11 #include <asm/cacheflush.h>
12 #include <asm/set_memory.h>
13 #include <asm/tlbflush.h>
14
15 struct page_change_data {
16 pgprot_t set_mask;
17 pgprot_t clear_mask;
18 };
19
20 bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
21
22 static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
23 {
24 struct page_change_data *cdata = data;
25 pte_t pte = READ_ONCE(*ptep);
26
27 pte = clear_pte_bit(pte, cdata->clear_mask);
28 pte = set_pte_bit(pte, cdata->set_mask);
29
30 set_pte(ptep, pte);
31 return 0;
32 }
33
34 /*
35 * This function assumes that the range is mapped with PAGE_SIZE pages.
36 */
37 static int __change_memory_common(unsigned long start, unsigned long size,
38 pgprot_t set_mask, pgprot_t clear_mask)
39 {
40 struct page_change_data data;
41 int ret;
42
43 data.set_mask = set_mask;
44 data.clear_mask = clear_mask;
45
46 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
47 &data);
48
49 flush_tlb_kernel_range(start, start + size);
50 return ret;
51 }
52
53 static int change_memory_common(unsigned long addr, int numpages,
54 pgprot_t set_mask, pgprot_t clear_mask)
55 {
56 unsigned long start = addr;
57 unsigned long size = PAGE_SIZE * numpages;
58 unsigned long end = start + size;
59 struct vm_struct *area;
60 int i;
61
62 if (!PAGE_ALIGNED(addr)) {
63 start &= PAGE_MASK;
64 end = start + size;
65 WARN_ON_ONCE(1);
66 }
67
68 /*
69 * Kernel VA mappings are always live, and splitting live section
70 * mappings into page mappings may cause TLB conflicts. This means
71 * we have to ensure that changing the permission bits of the range
72 * we are operating on does not result in such splitting.
73 *
74 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
75 * Those are guaranteed to consist entirely of page mappings, and
76 * splitting is never needed.
77 *
78 * So check whether the [addr, addr + size) interval is entirely
79 * covered by precisely one VM area that has the VM_ALLOC flag set.
80 */
81 area = find_vm_area((void *)addr);
82 if (!area ||
83 end > (unsigned long)area->addr + area->size ||
84 !(area->flags & VM_ALLOC))
85 return -EINVAL;
86
87 if (!numpages)
88 return 0;
89
90 /*
91 * If we are manipulating read-only permissions, apply the same
92 * change to the linear mapping of the pages that back this VM area.
93 */
94 if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
95 pgprot_val(clear_mask) == PTE_RDONLY)) {
96 for (i = 0; i < area->nr_pages; i++) {
97 __change_memory_common((u64)page_address(area->pages[i]),
98 PAGE_SIZE, set_mask, clear_mask);
99 }
100 }
101
102 /*
103 * Get rid of potentially aliasing lazily unmapped vm areas that may
104 * have permissions set that deviate from the ones we are setting here.
105 */
106 vm_unmap_aliases();
107
108 return __change_memory_common(start, size, set_mask, clear_mask);
109 }
110
111 int set_memory_ro(unsigned long addr, int numpages)
112 {
113 return change_memory_common(addr, numpages,
114 __pgprot(PTE_RDONLY),
115 __pgprot(PTE_WRITE));
116 }
117
118 int set_memory_rw(unsigned long addr, int numpages)
119 {
120 return change_memory_common(addr, numpages,
121 __pgprot(PTE_WRITE),
122 __pgprot(PTE_RDONLY));
123 }
124
125 int set_memory_nx(unsigned long addr, int numpages)
126 {
127 return change_memory_common(addr, numpages,
128 __pgprot(PTE_PXN),
129 __pgprot(PTE_MAYBE_GP));
130 }
131
132 int set_memory_x(unsigned long addr, int numpages)
133 {
134 return change_memory_common(addr, numpages,
135 __pgprot(PTE_MAYBE_GP),
136 __pgprot(PTE_PXN));
137 }
138
139 int set_memory_valid(unsigned long addr, int numpages, int enable)
140 {
141 if (enable)
142 return __change_memory_common(addr, PAGE_SIZE * numpages,
143 __pgprot(PTE_VALID),
144 __pgprot(0));
145 else
146 return __change_memory_common(addr, PAGE_SIZE * numpages,
147 __pgprot(0),
148 __pgprot(PTE_VALID));
149 }
150
151 int set_direct_map_invalid_noflush(struct page *page)
152 {
153 struct page_change_data data = {
154 .set_mask = __pgprot(0),
155 .clear_mask = __pgprot(PTE_VALID),
156 };
157
158 if (!debug_pagealloc_enabled() && !rodata_full)
159 return 0;
160
161 return apply_to_page_range(&init_mm,
162 (unsigned long)page_address(page),
163 PAGE_SIZE, change_page_range, &data);
164 }
165
166 int set_direct_map_default_noflush(struct page *page)
167 {
168 struct page_change_data data = {
169 .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
170 .clear_mask = __pgprot(PTE_RDONLY),
171 };
172
173 if (!debug_pagealloc_enabled() && !rodata_full)
174 return 0;
175
176 return apply_to_page_range(&init_mm,
177 (unsigned long)page_address(page),
178 PAGE_SIZE, change_page_range, &data);
179 }
180
181 #ifdef CONFIG_DEBUG_PAGEALLOC
182 void __kernel_map_pages(struct page *page, int numpages, int enable)
183 {
184 if (!debug_pagealloc_enabled() && !rodata_full)
185 return;
186
187 set_memory_valid((unsigned long)page_address(page), numpages, enable);
188 }
189 #endif /* CONFIG_DEBUG_PAGEALLOC */
190
191 /*
192 * This function is used to determine if a linear map page has been marked as
193 * not-valid. Walk the page table and check the PTE_VALID bit. This is based
194 * on kern_addr_valid(), which almost does what we need.
195 *
196 * Because this is only called on the kernel linear map, p?d_sect() implies
197 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
198 * disabled.
199 */
200 bool kernel_page_present(struct page *page)
201 {
202 pgd_t *pgdp;
203 p4d_t *p4dp;
204 pud_t *pudp, pud;
205 pmd_t *pmdp, pmd;
206 pte_t *ptep;
207 unsigned long addr = (unsigned long)page_address(page);
208
209 if (!debug_pagealloc_enabled() && !rodata_full)
210 return true;
211
212 pgdp = pgd_offset_k(addr);
213 if (pgd_none(READ_ONCE(*pgdp)))
214 return false;
215
216 p4dp = p4d_offset(pgdp, addr);
217 if (p4d_none(READ_ONCE(*p4dp)))
218 return false;
219
220 pudp = pud_offset(p4dp, addr);
221 pud = READ_ONCE(*pudp);
222 if (pud_none(pud))
223 return false;
224 if (pud_sect(pud))
225 return true;
226
227 pmdp = pmd_offset(pudp, addr);
228 pmd = READ_ONCE(*pmdp);
229 if (pmd_none(pmd))
230 return false;
231 if (pmd_sect(pmd))
232 return true;
233
234 ptep = pte_offset_kernel(pmdp, addr);
235 return pte_valid(READ_ONCE(*ptep));
236 }