]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/ia64/mm/hugetlbpage.c
Merge tag 'hwlock-v5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/andersson...
[mirror_ubuntu-jammy-kernel.git] / arch / ia64 / mm / hugetlbpage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * IA-64 Huge TLB Page Support for Kernel.
4 *
5 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
6 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
7 *
8 * Sep, 2003: add numa support
9 * Feb, 2004: dynamic hugetlb page size via boot parameter
10 */
11
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/pagemap.h>
17 #include <linux/module.h>
18 #include <linux/sysctl.h>
19 #include <linux/log2.h>
20 #include <asm/mman.h>
21 #include <asm/tlb.h>
22 #include <asm/tlbflush.h>
23
24 unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
25 EXPORT_SYMBOL(hpage_shift);
26
27 pte_t *
28 huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
29 unsigned long addr, unsigned long sz)
30 {
31 unsigned long taddr = htlbpage_to_page(addr);
32 pgd_t *pgd;
33 p4d_t *p4d;
34 pud_t *pud;
35 pmd_t *pmd;
36 pte_t *pte = NULL;
37
38 pgd = pgd_offset(mm, taddr);
39 p4d = p4d_offset(pgd, taddr);
40 pud = pud_alloc(mm, p4d, taddr);
41 if (pud) {
42 pmd = pmd_alloc(mm, pud, taddr);
43 if (pmd)
44 pte = pte_alloc_map(mm, pmd, taddr);
45 }
46 return pte;
47 }
48
49 pte_t *
50 huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
51 {
52 unsigned long taddr = htlbpage_to_page(addr);
53 pgd_t *pgd;
54 p4d_t *p4d;
55 pud_t *pud;
56 pmd_t *pmd;
57 pte_t *pte = NULL;
58
59 pgd = pgd_offset(mm, taddr);
60 if (pgd_present(*pgd)) {
61 p4d = p4d_offset(pgd, addr);
62 if (p4d_present(*p4d)) {
63 pud = pud_offset(p4d, taddr);
64 if (pud_present(*pud)) {
65 pmd = pmd_offset(pud, taddr);
66 if (pmd_present(*pmd))
67 pte = pte_offset_map(pmd, taddr);
68 }
69 }
70 }
71
72 return pte;
73 }
74
75 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
76
77 /*
78 * Don't actually need to do any preparation, but need to make sure
79 * the address is in the right region.
80 */
81 int prepare_hugepage_range(struct file *file,
82 unsigned long addr, unsigned long len)
83 {
84 if (len & ~HPAGE_MASK)
85 return -EINVAL;
86 if (addr & ~HPAGE_MASK)
87 return -EINVAL;
88 if (REGION_NUMBER(addr) != RGN_HPAGE)
89 return -EINVAL;
90
91 return 0;
92 }
93
94 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
95 {
96 struct page *page;
97 pte_t *ptep;
98
99 if (REGION_NUMBER(addr) != RGN_HPAGE)
100 return ERR_PTR(-EINVAL);
101
102 ptep = huge_pte_offset(mm, addr, HPAGE_SIZE);
103 if (!ptep || pte_none(*ptep))
104 return NULL;
105 page = pte_page(*ptep);
106 page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
107 return page;
108 }
109 int pmd_huge(pmd_t pmd)
110 {
111 return 0;
112 }
113
114 int pud_huge(pud_t pud)
115 {
116 return 0;
117 }
118
119 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
120 unsigned long addr, unsigned long end,
121 unsigned long floor, unsigned long ceiling)
122 {
123 /*
124 * This is called to free hugetlb page tables.
125 *
126 * The offset of these addresses from the base of the hugetlb
127 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
128 * the standard free_pgd_range will free the right page tables.
129 *
130 * If floor and ceiling are also in the hugetlb region, they
131 * must likewise be scaled down; but if outside, left unchanged.
132 */
133
134 addr = htlbpage_to_page(addr);
135 end = htlbpage_to_page(end);
136 if (REGION_NUMBER(floor) == RGN_HPAGE)
137 floor = htlbpage_to_page(floor);
138 if (REGION_NUMBER(ceiling) == RGN_HPAGE)
139 ceiling = htlbpage_to_page(ceiling);
140
141 free_pgd_range(tlb, addr, end, floor, ceiling);
142 }
143
144 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
145 unsigned long pgoff, unsigned long flags)
146 {
147 struct vm_unmapped_area_info info;
148
149 if (len > RGN_MAP_LIMIT)
150 return -ENOMEM;
151 if (len & ~HPAGE_MASK)
152 return -EINVAL;
153
154 /* Handle MAP_FIXED */
155 if (flags & MAP_FIXED) {
156 if (prepare_hugepage_range(file, addr, len))
157 return -EINVAL;
158 return addr;
159 }
160
161 /* This code assumes that RGN_HPAGE != 0. */
162 if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
163 addr = HPAGE_REGION_BASE;
164
165 info.flags = 0;
166 info.length = len;
167 info.low_limit = addr;
168 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
169 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
170 info.align_offset = 0;
171 return vm_unmapped_area(&info);
172 }
173
174 static int __init hugetlb_setup_sz(char *str)
175 {
176 u64 tr_pages;
177 unsigned long long size;
178
179 if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
180 /*
181 * shouldn't happen, but just in case.
182 */
183 tr_pages = 0x15557000UL;
184
185 size = memparse(str, &str);
186 if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
187 size <= PAGE_SIZE ||
188 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
189 printk(KERN_WARNING "Invalid huge page size specified\n");
190 return 1;
191 }
192
193 hpage_shift = __ffs(size);
194 /*
195 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
196 * override here with new page shift.
197 */
198 ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
199 return 0;
200 }
201 early_param("hugepagesz", hugetlb_setup_sz);