]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/microblaze/mm/pgtable.c
initramfs: cleanup incomplete rootfs
[mirror_ubuntu-jammy-kernel.git] / arch / microblaze / mm / pgtable.c
CommitLineData
15902bf6
MS
1/*
2 * This file contains the routines setting up the linux page tables.
3 *
4 * Copyright (C) 2008 Michal Simek
5 * Copyright (C) 2008 PetaLogix
6 *
7 * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
8 *
9 * Derived from arch/ppc/mm/pgtable.c:
10 * -- paulus
11 *
12 * Derived from arch/ppc/mm/init.c:
13 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
14 *
15 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
16 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
17 * Copyright (C) 1996 Paul Mackerras
18 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
19 *
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
22 *
23 * This file is subject to the terms and conditions of the GNU General
24 * Public License. See the file COPYING in the main directory of this
25 * archive for more details.
26 *
27 */
28
d64af918 29#include <linux/export.h>
15902bf6 30#include <linux/kernel.h>
15902bf6
MS
31#include <linux/types.h>
32#include <linux/vmalloc.h>
33#include <linux/init.h>
589ee628 34#include <linux/mm_types.h>
15902bf6
MS
35
36#include <asm/pgtable.h>
37#include <asm/pgalloc.h>
38#include <linux/io.h>
39#include <asm/mmu.h>
40#include <asm/sections.h>
41938761 41#include <asm/fixmap.h>
15902bf6 42
15902bf6
MS
43unsigned long ioremap_base;
44unsigned long ioremap_bot;
ee4bcdf1 45EXPORT_SYMBOL(ioremap_bot);
15902bf6 46
15902bf6
MS
47#ifndef CONFIG_SMP
48struct pgtable_cache_struct quicklists;
49#endif
50
51static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
52 unsigned long flags)
53{
54 unsigned long v, i;
55 phys_addr_t p;
56 int err;
57
58 /*
59 * Choose an address to map it to.
60 * Once the vmalloc system is running, we use it.
61 * Before then, we use space going down from ioremap_base
62 * (ioremap_bot records where we're up to).
63 */
64 p = addr & PAGE_MASK;
65 size = PAGE_ALIGN(addr + size) - p;
66
67 /*
68 * Don't allow anybody to remap normal RAM that we're using.
69 * mem_init() sets high_memory so only do the check after that.
70 *
71 * However, allow remap of rootfs: TBD
72 */
a66a6265 73
15902bf6
MS
74 if (mem_init_done &&
75 p >= memory_start && p < virt_to_phys(high_memory) &&
a66a6265
MS
76 !(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
77 p < __virt_to_phys((phys_addr_t)__bss_stop))) {
6bd55f0b
MS
78 pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n",
79 (unsigned long)p, __builtin_return_address(0));
15902bf6
MS
80 return NULL;
81 }
82
83 if (size == 0)
84 return NULL;
85
86 /*
87 * Is it already mapped? If the whole area is mapped then we're
88 * done, otherwise remap it since we want to keep the virt addrs for
89 * each request contiguous.
90 *
91 * We make the assumption here that if the bottom and top
92 * of the range we want are mapped then it's mapped to the
93 * same virt address (and this is contiguous).
94 * -- Cort
95 */
96
97 if (mem_init_done) {
98 struct vm_struct *area;
99 area = get_vm_area(size, VM_IOREMAP);
100 if (area == NULL)
101 return NULL;
cca5613f 102 v = (unsigned long) area->addr;
15902bf6
MS
103 } else {
104 v = (ioremap_bot -= size);
105 }
106
107 if ((flags & _PAGE_PRESENT) == 0)
108 flags |= _PAGE_KERNEL;
109 if (flags & _PAGE_NO_CACHE)
110 flags |= _PAGE_GUARDED;
111
112 err = 0;
113 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
114 err = map_page(v + i, p + i, flags);
115 if (err) {
116 if (mem_init_done)
117 vfree((void *)v);
118 return NULL;
119 }
120
121 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
122}
123
124void __iomem *ioremap(phys_addr_t addr, unsigned long size)
125{
126 return __ioremap(addr, size, _PAGE_NO_CACHE);
127}
128EXPORT_SYMBOL(ioremap);
129
2c957902 130void iounmap(volatile void __iomem *addr)
15902bf6 131{
6bd55f0b
MS
132 if ((__force void *)addr > high_memory &&
133 (unsigned long) addr < ioremap_bot)
15902bf6
MS
134 vfree((void *) (PAGE_MASK & (unsigned long) addr));
135}
136EXPORT_SYMBOL(iounmap);
137
138
139int map_page(unsigned long va, phys_addr_t pa, int flags)
140{
141 pmd_t *pd;
142 pte_t *pg;
143 int err = -ENOMEM;
15902bf6
MS
144 /* Use upper 10 bits of VA to index the first level map */
145 pd = pmd_offset(pgd_offset_k(va), va);
146 /* Use middle 10 bits of VA to index the second-level map */
147 pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
148 /* pg = pte_alloc_kernel(&init_mm, pd, va); */
149
150 if (pg != NULL) {
151 err = 0;
152 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
153 __pgprot(flags)));
78ebfa88 154 if (unlikely(mem_init_done))
6bd55f0b 155 _tlbie(va);
15902bf6 156 }
15902bf6
MS
157 return err;
158}
159
15902bf6
MS
160/*
161 * Map in all of physical memory starting at CONFIG_KERNEL_START.
162 */
163void __init mapin_ram(void)
164{
165 unsigned long v, p, s, f;
166
167 v = CONFIG_KERNEL_START;
168 p = memory_start;
7c0d2615 169 for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
15902bf6
MS
170 f = _PAGE_PRESENT | _PAGE_ACCESSED |
171 _PAGE_SHARED | _PAGE_HWEXEC;
172 if ((char *) v < _stext || (char *) v >= _etext)
173 f |= _PAGE_WRENABLE;
174 else
175 /* On the MicroBlaze, no user access
176 forces R/W kernel access */
177 f |= _PAGE_USER;
178 map_page(v, p, f);
179 v += PAGE_SIZE;
180 p += PAGE_SIZE;
181 }
182}
183
184/* is x a power of 2? */
185#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
186
15902bf6
MS
187/* Scan the real Linux page tables and return a PTE pointer for
188 * a virtual address in a context.
189 * Returns true (1) if PTE was found, zero otherwise. The pointer to
190 * the PTE pointer is unmodified if PTE is not found.
191 */
192static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
193{
194 pgd_t *pgd;
195 pmd_t *pmd;
196 pte_t *pte;
197 int retval = 0;
198
199 pgd = pgd_offset(mm, addr & PAGE_MASK);
200 if (pgd) {
201 pmd = pmd_offset(pgd, addr & PAGE_MASK);
202 if (pmd_present(*pmd)) {
203 pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
204 if (pte) {
205 retval = 1;
206 *ptep = pte;
207 }
208 }
209 }
210 return retval;
211}
212
213/* Find physical address for this virtual address. Normally used by
214 * I/O functions, but anyone can call it.
215 */
216unsigned long iopa(unsigned long addr)
217{
218 unsigned long pa;
219
220 pte_t *pte;
221 struct mm_struct *mm;
222
223 /* Allow mapping of user addresses (within the thread)
224 * for DMA if necessary.
225 */
226 if (addr < TASK_SIZE)
227 mm = current->mm;
228 else
229 mm = &init_mm;
230
231 pa = 0;
232 if (get_pteptr(mm, addr, &pte))
233 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
234
235 return pa;
236}
63f1032b 237
bd721ea7 238__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
63f1032b
MS
239 unsigned long address)
240{
241 pte_t *pte;
242 if (mem_init_done) {
32d6bd90 243 pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
63f1032b
MS
244 } else {
245 pte = (pte_t *)early_get_page();
246 if (pte)
247 clear_page(pte);
248 }
249 return pte;
250}
41938761
MS
251
252void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
253{
254 unsigned long address = __fix_to_virt(idx);
255
256 if (idx >= __end_of_fixed_addresses)
257 BUG();
258
259 map_page(address, phys, pgprot_val(flags));
260}