]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/arm64/kernel/mte.c
arm64: kasan: allow enabling in-kernel MTE
[mirror_ubuntu-jammy-kernel.git] / arch / arm64 / kernel / mte.c
CommitLineData
637ec831
VF
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020 ARM Ltd.
4 */
5
34bfeea4 6#include <linux/bitops.h>
18ddbaa0 7#include <linux/kernel.h>
34bfeea4 8#include <linux/mm.h>
1c101da8
CM
9#include <linux/prctl.h>
10#include <linux/sched.h>
18ddbaa0 11#include <linux/sched/mm.h>
4d1a8a2d 12#include <linux/string.h>
36943aba
SP
13#include <linux/swap.h>
14#include <linux/swapops.h>
637ec831 15#include <linux/thread_info.h>
85f49cae 16#include <linux/types.h>
18ddbaa0 17#include <linux/uio.h>
637ec831 18
85f49cae 19#include <asm/barrier.h>
637ec831
VF
20#include <asm/cpufeature.h>
21#include <asm/mte.h>
85f49cae 22#include <asm/mte-kasan.h>
18ddbaa0 23#include <asm/ptrace.h>
637ec831
VF
24#include <asm/sysreg.h>
25
36943aba
SP
26static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
27{
28 pte_t old_pte = READ_ONCE(*ptep);
29
30 if (check_swap && is_swap_pte(old_pte)) {
31 swp_entry_t entry = pte_to_swp_entry(old_pte);
32
33 if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
34 return;
35 }
36
e5b8d921
VF
37 page_kasan_tag_reset(page);
38 /*
39 * We need smp_wmb() in between setting the flags and clearing the
40 * tags because if another thread reads page->flags and builds a
41 * tagged address out of it, there is an actual dependency to the
42 * memory access, but on the current thread we do not guarantee that
43 * the new page->flags are visible before the tags were updated.
44 */
45 smp_wmb();
36943aba
SP
46 mte_clear_page_tags(page_address(page));
47}
48
34bfeea4
CM
49void mte_sync_tags(pte_t *ptep, pte_t pte)
50{
51 struct page *page = pte_page(pte);
52 long i, nr_pages = compound_nr(page);
36943aba 53 bool check_swap = nr_pages == 1;
34bfeea4
CM
54
55 /* if PG_mte_tagged is set, tags have already been initialised */
56 for (i = 0; i < nr_pages; i++, page++) {
57 if (!test_and_set_bit(PG_mte_tagged, &page->flags))
36943aba 58 mte_sync_page_tags(page, ptep, check_swap);
34bfeea4
CM
59 }
60}
61
4d1a8a2d
CM
62int memcmp_pages(struct page *page1, struct page *page2)
63{
64 char *addr1, *addr2;
65 int ret;
66
67 addr1 = page_address(page1);
68 addr2 = page_address(page2);
69 ret = memcmp(addr1, addr2, PAGE_SIZE);
70
71 if (!system_supports_mte() || ret)
72 return ret;
73
74 /*
75 * If the page content is identical but at least one of the pages is
76 * tagged, return non-zero to avoid KSM merging. If only one of the
77 * pages is tagged, set_pte_at() may zero or change the tags of the
78 * other page via mte_sync_tags().
79 */
80 if (test_bit(PG_mte_tagged, &page1->flags) ||
81 test_bit(PG_mte_tagged, &page2->flags))
82 return addr1 != addr2;
83
84 return ret;
85}
86
85f49cae
VF
87u8 mte_get_mem_tag(void *addr)
88{
89 if (!system_supports_mte())
90 return 0xFF;
91
92 asm(__MTE_PREAMBLE "ldg %0, [%0]"
93 : "+r" (addr));
94
95 return mte_get_ptr_tag(addr);
96}
97
98u8 mte_get_random_tag(void)
99{
100 void *addr;
101
102 if (!system_supports_mte())
103 return 0xFF;
104
105 asm(__MTE_PREAMBLE "irg %0, %0"
106 : "+r" (addr));
107
108 return mte_get_ptr_tag(addr);
109}
110
111void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
112{
113 void *ptr = addr;
114
115 if ((!system_supports_mte()) || (size == 0))
116 return addr;
117
118 /* Make sure that size is MTE granule aligned. */
119 WARN_ON(size & (MTE_GRANULE_SIZE - 1));
120
121 /* Make sure that the address is MTE granule aligned. */
122 WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
123
124 tag = 0xF0 | tag;
125 ptr = (void *)__tag_set(ptr, tag);
126
127 mte_assign_mem_tag_range(ptr, size);
128
129 return ptr;
130}
131
bfc62c59
VF
132void mte_enable_kernel(void)
133{
134 /* Enable MTE Sync Mode for EL1. */
135 sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC);
136 isb();
137}
138
1c101da8
CM
139static void update_sctlr_el1_tcf0(u64 tcf0)
140{
141 /* ISB required for the kernel uaccess routines */
142 sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
143 isb();
144}
145
146static void set_sctlr_el1_tcf0(u64 tcf0)
147{
148 /*
149 * mte_thread_switch() checks current->thread.sctlr_tcf0 as an
150 * optimisation. Disable preemption so that it does not see
151 * the variable update before the SCTLR_EL1.TCF0 one.
152 */
153 preempt_disable();
154 current->thread.sctlr_tcf0 = tcf0;
155 update_sctlr_el1_tcf0(tcf0);
156 preempt_enable();
157}
158
af5ce952
CM
159static void update_gcr_el1_excl(u64 incl)
160{
161 u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
162
163 /*
164 * Note that 'incl' is an include mask (controlled by the user via
165 * prctl()) while GCR_EL1 accepts an exclude mask.
166 * No need for ISB since this only affects EL0 currently, implicit
167 * with ERET.
168 */
169 sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
170}
171
172static void set_gcr_el1_excl(u64 incl)
173{
174 current->thread.gcr_user_incl = incl;
175 update_gcr_el1_excl(incl);
176}
177
637ec831
VF
178void flush_mte_state(void)
179{
180 if (!system_supports_mte())
181 return;
182
183 /* clear any pending asynchronous tag fault */
184 dsb(ish);
185 write_sysreg_s(0, SYS_TFSRE0_EL1);
186 clear_thread_flag(TIF_MTE_ASYNC_FAULT);
1c101da8
CM
187 /* disable tag checking */
188 set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
af5ce952
CM
189 /* reset tag generation mask */
190 set_gcr_el1_excl(0);
1c101da8
CM
191}
192
193void mte_thread_switch(struct task_struct *next)
194{
195 if (!system_supports_mte())
196 return;
197
198 /* avoid expensive SCTLR_EL1 accesses if no change */
199 if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
200 update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
af5ce952 201 update_gcr_el1_excl(next->thread.gcr_user_incl);
1c101da8
CM
202}
203
39d08e83
CM
204void mte_suspend_exit(void)
205{
206 if (!system_supports_mte())
207 return;
208
209 update_gcr_el1_excl(current->thread.gcr_user_incl);
210}
211
93f067f6 212long set_mte_ctrl(struct task_struct *task, unsigned long arg)
1c101da8
CM
213{
214 u64 tcf0;
93f067f6 215 u64 gcr_incl = (arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT;
1c101da8
CM
216
217 if (!system_supports_mte())
218 return 0;
219
220 switch (arg & PR_MTE_TCF_MASK) {
221 case PR_MTE_TCF_NONE:
222 tcf0 = SCTLR_EL1_TCF0_NONE;
223 break;
224 case PR_MTE_TCF_SYNC:
225 tcf0 = SCTLR_EL1_TCF0_SYNC;
226 break;
227 case PR_MTE_TCF_ASYNC:
228 tcf0 = SCTLR_EL1_TCF0_ASYNC;
229 break;
230 default:
231 return -EINVAL;
232 }
233
93f067f6
CM
234 if (task != current) {
235 task->thread.sctlr_tcf0 = tcf0;
236 task->thread.gcr_user_incl = gcr_incl;
237 } else {
238 set_sctlr_el1_tcf0(tcf0);
239 set_gcr_el1_excl(gcr_incl);
240 }
1c101da8
CM
241
242 return 0;
243}
244
93f067f6 245long get_mte_ctrl(struct task_struct *task)
1c101da8 246{
af5ce952
CM
247 unsigned long ret;
248
1c101da8
CM
249 if (!system_supports_mte())
250 return 0;
251
93f067f6 252 ret = task->thread.gcr_user_incl << PR_MTE_TAG_SHIFT;
af5ce952 253
93f067f6 254 switch (task->thread.sctlr_tcf0) {
1c101da8 255 case SCTLR_EL1_TCF0_NONE:
929c1f33
PC
256 ret |= PR_MTE_TCF_NONE;
257 break;
1c101da8 258 case SCTLR_EL1_TCF0_SYNC:
af5ce952
CM
259 ret |= PR_MTE_TCF_SYNC;
260 break;
1c101da8 261 case SCTLR_EL1_TCF0_ASYNC:
af5ce952
CM
262 ret |= PR_MTE_TCF_ASYNC;
263 break;
1c101da8
CM
264 }
265
af5ce952 266 return ret;
637ec831 267}
18ddbaa0
CM
268
269/*
270 * Access MTE tags in another process' address space as given in mm. Update
271 * the number of tags copied. Return 0 if any tags copied, error otherwise.
272 * Inspired by __access_remote_vm().
273 */
274static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
275 struct iovec *kiov, unsigned int gup_flags)
276{
277 struct vm_area_struct *vma;
278 void __user *buf = kiov->iov_base;
279 size_t len = kiov->iov_len;
280 int ret;
281 int write = gup_flags & FOLL_WRITE;
282
283 if (!access_ok(buf, len))
284 return -EFAULT;
285
286 if (mmap_read_lock_killable(mm))
287 return -EIO;
288
289 while (len) {
290 unsigned long tags, offset;
291 void *maddr;
292 struct page *page = NULL;
293
294 ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page,
295 &vma, NULL);
296 if (ret <= 0)
297 break;
298
299 /*
300 * Only copy tags if the page has been mapped as PROT_MTE
301 * (PG_mte_tagged set). Otherwise the tags are not valid and
302 * not accessible to user. Moreover, an mprotect(PROT_MTE)
303 * would cause the existing tags to be cleared if the page
304 * was never mapped with PROT_MTE.
305 */
306 if (!test_bit(PG_mte_tagged, &page->flags)) {
307 ret = -EOPNOTSUPP;
308 put_page(page);
309 break;
310 }
311
312 /* limit access to the end of the page */
313 offset = offset_in_page(addr);
314 tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE);
315
316 maddr = page_address(page);
317 if (write) {
318 tags = mte_copy_tags_from_user(maddr + offset, buf, tags);
319 set_page_dirty_lock(page);
320 } else {
321 tags = mte_copy_tags_to_user(buf, maddr + offset, tags);
322 }
323 put_page(page);
324
325 /* error accessing the tracer's buffer */
326 if (!tags)
327 break;
328
329 len -= tags;
330 buf += tags;
331 addr += tags * MTE_GRANULE_SIZE;
332 }
333 mmap_read_unlock(mm);
334
335 /* return an error if no tags copied */
336 kiov->iov_len = buf - kiov->iov_base;
337 if (!kiov->iov_len) {
338 /* check for error accessing the tracee's address space */
339 if (ret <= 0)
340 return -EIO;
341 else
342 return -EFAULT;
343 }
344
345 return 0;
346}
347
348/*
349 * Copy MTE tags in another process' address space at 'addr' to/from tracer's
350 * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm().
351 */
352static int access_remote_tags(struct task_struct *tsk, unsigned long addr,
353 struct iovec *kiov, unsigned int gup_flags)
354{
355 struct mm_struct *mm;
356 int ret;
357
358 mm = get_task_mm(tsk);
359 if (!mm)
360 return -EPERM;
361
362 if (!tsk->ptrace || (current != tsk->parent) ||
363 ((get_dumpable(mm) != SUID_DUMP_USER) &&
364 !ptracer_capable(tsk, mm->user_ns))) {
365 mmput(mm);
366 return -EPERM;
367 }
368
369 ret = __access_remote_tags(mm, addr, kiov, gup_flags);
370 mmput(mm);
371
372 return ret;
373}
374
375int mte_ptrace_copy_tags(struct task_struct *child, long request,
376 unsigned long addr, unsigned long data)
377{
378 int ret;
379 struct iovec kiov;
380 struct iovec __user *uiov = (void __user *)data;
381 unsigned int gup_flags = FOLL_FORCE;
382
383 if (!system_supports_mte())
384 return -EIO;
385
386 if (get_user(kiov.iov_base, &uiov->iov_base) ||
387 get_user(kiov.iov_len, &uiov->iov_len))
388 return -EFAULT;
389
390 if (request == PTRACE_POKEMTETAGS)
391 gup_flags |= FOLL_WRITE;
392
393 /* align addr to the MTE tag granule */
394 addr &= MTE_GRANULE_MASK;
395
396 ret = access_remote_tags(child, addr, &kiov, gup_flags);
397 if (!ret)
398 ret = put_user(kiov.iov_len, &uiov->iov_len);
399
400 return ret;
401}