]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/ia64/mm/hugetlbpage.c
[PATCH] Hugepage consolidation
[mirror_ubuntu-artful-kernel.git] / arch / ia64 / mm / hugetlbpage.c
CommitLineData
1da177e4
LT
1/*
2 * IA-64 Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
6 *
7 * Sep, 2003: add numa support
8 * Feb, 2004: dynamic hugetlb page size via boot parameter
9 */
10
11#include <linux/config.h>
12#include <linux/init.h>
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
16#include <linux/pagemap.h>
17#include <linux/smp_lock.h>
18#include <linux/slab.h>
19#include <linux/sysctl.h>
20#include <asm/mman.h>
21#include <asm/pgalloc.h>
22#include <asm/tlb.h>
23#include <asm/tlbflush.h>
24
25unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
26
63551ae0 27pte_t *
1da177e4
LT
28huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
29{
30 unsigned long taddr = htlbpage_to_page(addr);
31 pgd_t *pgd;
32 pud_t *pud;
33 pmd_t *pmd;
34 pte_t *pte = NULL;
35
36 pgd = pgd_offset(mm, taddr);
37 pud = pud_alloc(mm, pgd, taddr);
38 if (pud) {
39 pmd = pmd_alloc(mm, pud, taddr);
40 if (pmd)
41 pte = pte_alloc_map(mm, pmd, taddr);
42 }
43 return pte;
44}
45
63551ae0 46pte_t *
1da177e4
LT
47huge_pte_offset (struct mm_struct *mm, unsigned long addr)
48{
49 unsigned long taddr = htlbpage_to_page(addr);
50 pgd_t *pgd;
51 pud_t *pud;
52 pmd_t *pmd;
53 pte_t *pte = NULL;
54
55 pgd = pgd_offset(mm, taddr);
56 if (pgd_present(*pgd)) {
57 pud = pud_offset(pgd, taddr);
58 if (pud_present(*pud)) {
59 pmd = pmd_offset(pud, taddr);
60 if (pmd_present(*pmd))
61 pte = pte_offset_map(pmd, taddr);
62 }
63 }
64
65 return pte;
66}
67
68#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
69
1da177e4
LT
70/*
71 * This function checks for proper alignment of input addr and len parameters.
72 */
73int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
74{
75 if (len & ~HPAGE_MASK)
76 return -EINVAL;
77 if (addr & ~HPAGE_MASK)
78 return -EINVAL;
79 if (REGION_NUMBER(addr) != REGION_HPAGE)
80 return -EINVAL;
81
82 return 0;
83}
84
1da177e4
LT
85struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
86{
87 struct page *page;
88 pte_t *ptep;
89
90 if (REGION_NUMBER(addr) != REGION_HPAGE)
91 return ERR_PTR(-EINVAL);
92
93 ptep = huge_pte_offset(mm, addr);
94 if (!ptep || pte_none(*ptep))
95 return NULL;
96 page = pte_page(*ptep);
97 page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
98 return page;
99}
100int pmd_huge(pmd_t pmd)
101{
102 return 0;
103}
104struct page *
105follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
106{
107 return NULL;
108}
109
3bf5ee95
HD
110void hugetlb_free_pgd_range(struct mmu_gather **tlb,
111 unsigned long addr, unsigned long end,
112 unsigned long floor, unsigned long ceiling)
1da177e4 113{
3bf5ee95
HD
114 /*
115 * This is called only when is_hugepage_only_range(addr,),
116 * and it follows that is_hugepage_only_range(end,) also.
117 *
118 * The offset of these addresses from the base of the hugetlb
119 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
120 * the standard free_pgd_range will free the right page tables.
121 *
122 * If floor and ceiling are also in the hugetlb region, they
123 * must likewise be scaled down; but if outside, left unchanged.
124 */
125
126 addr = htlbpage_to_page(addr);
127 end = htlbpage_to_page(end);
128 if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE))
129 floor = htlbpage_to_page(floor);
130 if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE))
131 ceiling = htlbpage_to_page(ceiling);
132
133 free_pgd_range(tlb, addr, end, floor, ceiling);
1da177e4
LT
134}
135
1da177e4
LT
136unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
137 unsigned long pgoff, unsigned long flags)
138{
139 struct vm_area_struct *vmm;
140
141 if (len > RGN_MAP_LIMIT)
142 return -ENOMEM;
143 if (len & ~HPAGE_MASK)
144 return -EINVAL;
145 /* This code assumes that REGION_HPAGE != 0. */
146 if ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE - 1)))
147 addr = HPAGE_REGION_BASE;
148 else
149 addr = ALIGN(addr, HPAGE_SIZE);
150 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
151 /* At this point: (!vmm || addr < vmm->vm_end). */
152 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
153 return -ENOMEM;
154 if (!vmm || (addr + len) <= vmm->vm_start)
155 return addr;
156 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
157 }
158}
159
160static int __init hugetlb_setup_sz(char *str)
161{
162 u64 tr_pages;
163 unsigned long long size;
164
165 if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
166 /*
167 * shouldn't happen, but just in case.
168 */
169 tr_pages = 0x15557000UL;
170
171 size = memparse(str, &str);
172 if (*str || (size & (size-1)) || !(tr_pages & size) ||
173 size <= PAGE_SIZE ||
174 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
175 printk(KERN_WARNING "Invalid huge page size specified\n");
176 return 1;
177 }
178
179 hpage_shift = __ffs(size);
180 /*
181 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
182 * override here with new page shift.
183 */
184 ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
185 return 1;
186}
187__setup("hugepagesz=", hugetlb_setup_sz);