]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/arm/mm/mmap.c
UBUNTU: Ubuntu-5.11.0-22.23
[mirror_ubuntu-hirsute-kernel.git] / arch / arm / mm / mmap.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/arch/arm/mm/mmap.c
4 */
1da177e4
LT
5#include <linux/fs.h>
6#include <linux/mm.h>
7#include <linux/mman.h>
8#include <linux/shm.h>
3f07c014 9#include <linux/sched/signal.h>
01042607 10#include <linux/sched/mm.h>
09d9bae0 11#include <linux/io.h>
df5419a9 12#include <linux/personality.h>
cc92c28b 13#include <linux/random.h>
41dfaa93 14#include <asm/cachetype.h>
1da177e4
LT
15
16#define COLOUR_ALIGN(addr,pgoff) \
17 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
18 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
19
20/*
21 * We need to ensure that shared mappings are correctly aligned to
22 * avoid aliasing issues with VIPT caches. We need to ensure that
23 * a specific page of an object is always mapped at a multiple of
24 * SHMLBA bytes.
25 *
26 * We unconditionally provide this function for all cases, however
27 * in the VIVT case, we optimise out the alignment rules.
28 */
29unsigned long
30arch_get_unmapped_area(struct file *filp, unsigned long addr,
31 unsigned long len, unsigned long pgoff, unsigned long flags)
32{
33 struct mm_struct *mm = current->mm;
34 struct vm_area_struct *vma;
41dfaa93
RH
35 int do_align = 0;
36 int aliasing = cache_is_vipt_aliasing();
394ef640 37 struct vm_unmapped_area_info info;
1da177e4
LT
38
39 /*
40 * We only need to do colour alignment if either the I or D
41dfaa93 41 * caches alias.
1da177e4 42 */
41dfaa93
RH
43 if (aliasing)
44 do_align = filp || (flags & MAP_SHARED);
1da177e4
LT
45
46 /*
acec0ac0 47 * We enforce the MAP_FIXED case.
1da177e4
LT
48 */
49 if (flags & MAP_FIXED) {
e77414e0
AV
50 if (aliasing && flags & MAP_SHARED &&
51 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
1da177e4
LT
52 return -EINVAL;
53 return addr;
54 }
55
56 if (len > TASK_SIZE)
57 return -ENOMEM;
58
59 if (addr) {
60 if (do_align)
61 addr = COLOUR_ALIGN(addr, pgoff);
62 else
63 addr = PAGE_ALIGN(addr);
64
65 vma = find_vma(mm, addr);
66 if (TASK_SIZE - len >= addr &&
1be7107f 67 (!vma || addr + len <= vm_start_gap(vma)))
1da177e4
LT
68 return addr;
69 }
1da177e4 70
394ef640
ML
71 info.flags = 0;
72 info.length = len;
73 info.low_limit = mm->mmap_base;
74 info.high_limit = TASK_SIZE;
75 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
76 info.align_offset = pgoff << PAGE_SHIFT;
77 return vm_unmapped_area(&info);
1da177e4
LT
78}
79
7dbaa466
RH
80unsigned long
81arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82 const unsigned long len, const unsigned long pgoff,
83 const unsigned long flags)
84{
85 struct vm_area_struct *vma;
86 struct mm_struct *mm = current->mm;
87 unsigned long addr = addr0;
88 int do_align = 0;
89 int aliasing = cache_is_vipt_aliasing();
394ef640 90 struct vm_unmapped_area_info info;
7dbaa466
RH
91
92 /*
93 * We only need to do colour alignment if either the I or D
94 * caches alias.
95 */
96 if (aliasing)
97 do_align = filp || (flags & MAP_SHARED);
98
99 /* requested length too big for entire address space */
100 if (len > TASK_SIZE)
101 return -ENOMEM;
102
103 if (flags & MAP_FIXED) {
104 if (aliasing && flags & MAP_SHARED &&
105 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
106 return -EINVAL;
107 return addr;
108 }
109
110 /* requesting a specific address */
111 if (addr) {
112 if (do_align)
113 addr = COLOUR_ALIGN(addr, pgoff);
114 else
115 addr = PAGE_ALIGN(addr);
116 vma = find_vma(mm, addr);
117 if (TASK_SIZE - len >= addr &&
1be7107f 118 (!vma || addr + len <= vm_start_gap(vma)))
7dbaa466
RH
119 return addr;
120 }
121
394ef640
ML
122 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
123 info.length = len;
d8aa712c 124 info.low_limit = FIRST_USER_ADDRESS;
394ef640
ML
125 info.high_limit = mm->mmap_base;
126 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
127 info.align_offset = pgoff << PAGE_SHIFT;
128 addr = vm_unmapped_area(&info);
7dbaa466 129
7dbaa466
RH
130 /*
131 * A failed mmap() very likely causes application failure,
132 * so fall back to the bottom-up function here. This scenario
133 * can happen with large stack limits and large mmap()
134 * allocations.
135 */
394ef640
ML
136 if (addr & ~PAGE_MASK) {
137 VM_BUG_ON(addr != -ENOMEM);
138 info.flags = 0;
139 info.low_limit = mm->mmap_base;
140 info.high_limit = TASK_SIZE;
141 addr = vm_unmapped_area(&info);
142 }
7dbaa466
RH
143
144 return addr;
145}
146
51635ad2
LB
147/*
148 * You really shouldn't be using read() or write() on /dev/mem. This
149 * might go away in the future.
150 */
7e6735c3 151int valid_phys_addr_range(phys_addr_t addr, size_t size)
51635ad2 152{
9ae3ae0b
AR
153 if (addr < PHYS_OFFSET)
154 return 0;
6806bfe1 155 if (addr + size > __pa(high_memory - 1) + 1)
51635ad2
LB
156 return 0;
157
158 return 1;
159}
160
161/*
3159f372 162 * Do not allow /dev/mem mappings beyond the supported physical range.
51635ad2
LB
163 */
164int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
165{
3159f372 166 return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
51635ad2 167}