]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/s390/lib/uaccess_pt.c
Merge commit 'v2.6.29' into timers/core
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / lib / uaccess_pt.c
1 /*
2 * arch/s390/lib/uaccess_pt.c
3 *
4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
6 *
7 * Copyright IBM Corp. 2006
8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
9 */
10
11 #include <linux/errno.h>
12 #include <linux/hardirq.h>
13 #include <linux/mm.h>
14 #include <asm/uaccess.h>
15 #include <asm/futex.h>
16 #include "uaccess.h"
17
18 static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
19 {
20 pgd_t *pgd;
21 pud_t *pud;
22 pmd_t *pmd;
23
24 pgd = pgd_offset(mm, addr);
25 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
26 return NULL;
27
28 pud = pud_offset(pgd, addr);
29 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
30 return NULL;
31
32 pmd = pmd_offset(pud, addr);
33 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
34 return NULL;
35
36 return pte_offset_map(pmd, addr);
37 }
38
39 static int __handle_fault(struct mm_struct *mm, unsigned long address,
40 int write_access)
41 {
42 struct vm_area_struct *vma;
43 int ret = -EFAULT;
44 int fault;
45
46 if (in_atomic())
47 return ret;
48 down_read(&mm->mmap_sem);
49 vma = find_vma(mm, address);
50 if (unlikely(!vma))
51 goto out;
52 if (unlikely(vma->vm_start > address)) {
53 if (!(vma->vm_flags & VM_GROWSDOWN))
54 goto out;
55 if (expand_stack(vma, address))
56 goto out;
57 }
58
59 if (!write_access) {
60 /* page not present, check vm flags */
61 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
62 goto out;
63 } else {
64 if (!(vma->vm_flags & VM_WRITE))
65 goto out;
66 }
67
68 survive:
69 fault = handle_mm_fault(mm, vma, address, write_access);
70 if (unlikely(fault & VM_FAULT_ERROR)) {
71 if (fault & VM_FAULT_OOM)
72 goto out_of_memory;
73 else if (fault & VM_FAULT_SIGBUS)
74 goto out_sigbus;
75 BUG();
76 }
77 if (fault & VM_FAULT_MAJOR)
78 current->maj_flt++;
79 else
80 current->min_flt++;
81 ret = 0;
82 out:
83 up_read(&mm->mmap_sem);
84 return ret;
85
86 out_of_memory:
87 up_read(&mm->mmap_sem);
88 if (is_global_init(current)) {
89 yield();
90 down_read(&mm->mmap_sem);
91 goto survive;
92 }
93 printk("VM: killing process %s\n", current->comm);
94 return ret;
95
96 out_sigbus:
97 up_read(&mm->mmap_sem);
98 current->thread.prot_addr = address;
99 current->thread.trap_no = 0x11;
100 force_sig(SIGBUS, current);
101 return ret;
102 }
103
104 static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
105 size_t n, int write_user)
106 {
107 struct mm_struct *mm = current->mm;
108 unsigned long offset, pfn, done, size;
109 pte_t *pte;
110 void *from, *to;
111
112 done = 0;
113 retry:
114 spin_lock(&mm->page_table_lock);
115 do {
116 pte = follow_table(mm, uaddr);
117 if (!pte || !pte_present(*pte) ||
118 (write_user && !pte_write(*pte)))
119 goto fault;
120
121 pfn = pte_pfn(*pte);
122
123 offset = uaddr & (PAGE_SIZE - 1);
124 size = min(n - done, PAGE_SIZE - offset);
125 if (write_user) {
126 to = (void *)((pfn << PAGE_SHIFT) + offset);
127 from = kptr + done;
128 } else {
129 from = (void *)((pfn << PAGE_SHIFT) + offset);
130 to = kptr + done;
131 }
132 memcpy(to, from, size);
133 done += size;
134 uaddr += size;
135 } while (done < n);
136 spin_unlock(&mm->page_table_lock);
137 return n - done;
138 fault:
139 spin_unlock(&mm->page_table_lock);
140 if (__handle_fault(mm, uaddr, write_user))
141 return n - done;
142 goto retry;
143 }
144
145 /*
146 * Do DAT for user address by page table walk, return kernel address.
147 * This function needs to be called with current->mm->page_table_lock held.
148 */
149 static unsigned long __dat_user_addr(unsigned long uaddr)
150 {
151 struct mm_struct *mm = current->mm;
152 unsigned long pfn, ret;
153 pte_t *pte;
154 int rc;
155
156 ret = 0;
157 retry:
158 pte = follow_table(mm, uaddr);
159 if (!pte || !pte_present(*pte))
160 goto fault;
161
162 pfn = pte_pfn(*pte);
163 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
164 out:
165 return ret;
166 fault:
167 spin_unlock(&mm->page_table_lock);
168 rc = __handle_fault(mm, uaddr, 0);
169 spin_lock(&mm->page_table_lock);
170 if (rc)
171 goto out;
172 goto retry;
173 }
174
175 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
176 {
177 size_t rc;
178
179 if (segment_eq(get_fs(), KERNEL_DS)) {
180 memcpy(to, (void __kernel __force *) from, n);
181 return 0;
182 }
183 rc = __user_copy_pt((unsigned long) from, to, n, 0);
184 if (unlikely(rc))
185 memset(to + n - rc, 0, rc);
186 return rc;
187 }
188
189 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
190 {
191 if (segment_eq(get_fs(), KERNEL_DS)) {
192 memcpy((void __kernel __force *) to, from, n);
193 return 0;
194 }
195 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
196 }
197
198 static size_t clear_user_pt(size_t n, void __user *to)
199 {
200 long done, size, ret;
201
202 if (segment_eq(get_fs(), KERNEL_DS)) {
203 memset((void __kernel __force *) to, 0, n);
204 return 0;
205 }
206 done = 0;
207 do {
208 if (n - done > PAGE_SIZE)
209 size = PAGE_SIZE;
210 else
211 size = n - done;
212 ret = __user_copy_pt((unsigned long) to + done,
213 &empty_zero_page, size, 1);
214 done += size;
215 if (ret)
216 return ret + n - done;
217 } while (done < n);
218 return 0;
219 }
220
221 static size_t strnlen_user_pt(size_t count, const char __user *src)
222 {
223 char *addr;
224 unsigned long uaddr = (unsigned long) src;
225 struct mm_struct *mm = current->mm;
226 unsigned long offset, pfn, done, len;
227 pte_t *pte;
228 size_t len_str;
229
230 if (segment_eq(get_fs(), KERNEL_DS))
231 return strnlen((const char __kernel __force *) src, count) + 1;
232 done = 0;
233 retry:
234 spin_lock(&mm->page_table_lock);
235 do {
236 pte = follow_table(mm, uaddr);
237 if (!pte || !pte_present(*pte))
238 goto fault;
239
240 pfn = pte_pfn(*pte);
241 offset = uaddr & (PAGE_SIZE-1);
242 addr = (char *)(pfn << PAGE_SHIFT) + offset;
243 len = min(count - done, PAGE_SIZE - offset);
244 len_str = strnlen(addr, len);
245 done += len_str;
246 uaddr += len_str;
247 } while ((len_str == len) && (done < count));
248 spin_unlock(&mm->page_table_lock);
249 return done + 1;
250 fault:
251 spin_unlock(&mm->page_table_lock);
252 if (__handle_fault(mm, uaddr, 0)) {
253 return 0;
254 }
255 goto retry;
256 }
257
258 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
259 char *dst)
260 {
261 size_t n = strnlen_user_pt(count, src);
262
263 if (!n)
264 return -EFAULT;
265 if (n > count)
266 n = count;
267 if (segment_eq(get_fs(), KERNEL_DS)) {
268 memcpy(dst, (const char __kernel __force *) src, n);
269 if (dst[n-1] == '\0')
270 return n-1;
271 else
272 return n;
273 }
274 if (__user_copy_pt((unsigned long) src, dst, n, 0))
275 return -EFAULT;
276 if (dst[n-1] == '\0')
277 return n-1;
278 else
279 return n;
280 }
281
282 static size_t copy_in_user_pt(size_t n, void __user *to,
283 const void __user *from)
284 {
285 struct mm_struct *mm = current->mm;
286 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
287 uaddr, done, size;
288 unsigned long uaddr_from = (unsigned long) from;
289 unsigned long uaddr_to = (unsigned long) to;
290 pte_t *pte_from, *pte_to;
291 int write_user;
292
293 if (segment_eq(get_fs(), KERNEL_DS)) {
294 memcpy((void __force *) to, (void __force *) from, n);
295 return 0;
296 }
297 done = 0;
298 retry:
299 spin_lock(&mm->page_table_lock);
300 do {
301 pte_from = follow_table(mm, uaddr_from);
302 if (!pte_from || !pte_present(*pte_from)) {
303 uaddr = uaddr_from;
304 write_user = 0;
305 goto fault;
306 }
307
308 pte_to = follow_table(mm, uaddr_to);
309 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
310 uaddr = uaddr_to;
311 write_user = 1;
312 goto fault;
313 }
314
315 pfn_from = pte_pfn(*pte_from);
316 pfn_to = pte_pfn(*pte_to);
317 offset_from = uaddr_from & (PAGE_SIZE-1);
318 offset_to = uaddr_from & (PAGE_SIZE-1);
319 offset_max = max(offset_from, offset_to);
320 size = min(n - done, PAGE_SIZE - offset_max);
321
322 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
323 (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
324 done += size;
325 uaddr_from += size;
326 uaddr_to += size;
327 } while (done < n);
328 spin_unlock(&mm->page_table_lock);
329 return n - done;
330 fault:
331 spin_unlock(&mm->page_table_lock);
332 if (__handle_fault(mm, uaddr, write_user))
333 return n - done;
334 goto retry;
335 }
336
337 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
338 asm volatile("0: l %1,0(%6)\n" \
339 "1: " insn \
340 "2: cs %1,%2,0(%6)\n" \
341 "3: jl 1b\n" \
342 " lhi %0,0\n" \
343 "4:\n" \
344 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
345 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
346 "=m" (*uaddr) \
347 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
348 "m" (*uaddr) : "cc" );
349
350 static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
351 {
352 int oldval = 0, newval, ret;
353
354 switch (op) {
355 case FUTEX_OP_SET:
356 __futex_atomic_op("lr %2,%5\n",
357 ret, oldval, newval, uaddr, oparg);
358 break;
359 case FUTEX_OP_ADD:
360 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
361 ret, oldval, newval, uaddr, oparg);
362 break;
363 case FUTEX_OP_OR:
364 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
365 ret, oldval, newval, uaddr, oparg);
366 break;
367 case FUTEX_OP_ANDN:
368 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
369 ret, oldval, newval, uaddr, oparg);
370 break;
371 case FUTEX_OP_XOR:
372 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
373 ret, oldval, newval, uaddr, oparg);
374 break;
375 default:
376 ret = -ENOSYS;
377 }
378 if (ret == 0)
379 *old = oldval;
380 return ret;
381 }
382
383 int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
384 {
385 int ret;
386
387 if (segment_eq(get_fs(), KERNEL_DS))
388 return __futex_atomic_op_pt(op, uaddr, oparg, old);
389 spin_lock(&current->mm->page_table_lock);
390 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
391 if (!uaddr) {
392 spin_unlock(&current->mm->page_table_lock);
393 return -EFAULT;
394 }
395 get_page(virt_to_page(uaddr));
396 spin_unlock(&current->mm->page_table_lock);
397 ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
398 put_page(virt_to_page(uaddr));
399 return ret;
400 }
401
402 static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
403 {
404 int ret;
405
406 asm volatile("0: cs %1,%4,0(%5)\n"
407 "1: lr %0,%1\n"
408 "2:\n"
409 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
410 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
411 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
412 : "cc", "memory" );
413 return ret;
414 }
415
416 int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
417 {
418 int ret;
419
420 if (segment_eq(get_fs(), KERNEL_DS))
421 return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
422 spin_lock(&current->mm->page_table_lock);
423 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
424 if (!uaddr) {
425 spin_unlock(&current->mm->page_table_lock);
426 return -EFAULT;
427 }
428 get_page(virt_to_page(uaddr));
429 spin_unlock(&current->mm->page_table_lock);
430 ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
431 put_page(virt_to_page(uaddr));
432 return ret;
433 }
434
435 struct uaccess_ops uaccess_pt = {
436 .copy_from_user = copy_from_user_pt,
437 .copy_from_user_small = copy_from_user_pt,
438 .copy_to_user = copy_to_user_pt,
439 .copy_to_user_small = copy_to_user_pt,
440 .copy_in_user = copy_in_user_pt,
441 .clear_user = clear_user_pt,
442 .strnlen_user = strnlen_user_pt,
443 .strncpy_from_user = strncpy_from_user_pt,
444 .futex_atomic_op = futex_atomic_op_pt,
445 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
446 };