]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/openrisc/mm/ioremap.c
Merge tag 'fscrypt-for-linus_stable' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / arch / openrisc / mm / ioremap.c
CommitLineData
61e85e36
JB
1/*
2 * OpenRISC ioremap.c
3 *
4 * Linux architectural port borrowing liberally from similar works of
5 * others. All original copyrights apply as per the original source
6 * declaration.
7 *
8 * Modifications for the OpenRISC architecture:
9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#include <linux/vmalloc.h>
19#include <linux/io.h>
20#include <asm/pgalloc.h>
21#include <asm/kmap_types.h>
22#include <asm/fixmap.h>
23#include <asm/bug.h>
24#include <asm/pgtable.h>
25#include <linux/sched.h>
26#include <asm/tlbflush.h>
27
28extern int mem_init_done;
29
30static unsigned int fixmaps_used __initdata;
31
32/*
33 * Remap an arbitrary physical address space into the kernel virtual
34 * address space. Needed when the kernel wants to access high addresses
35 * directly.
36 *
37 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
38 * have to convert them into an offset in a page-aligned mapping, but the
39 * caller shouldn't need to know that small detail.
40 */
bd721ea7 41void __iomem *__ref
61e85e36
JB
42__ioremap(phys_addr_t addr, unsigned long size, pgprot_t prot)
43{
44 phys_addr_t p;
45 unsigned long v;
46 unsigned long offset, last_addr;
47 struct vm_struct *area = NULL;
48
49 /* Don't allow wraparound or zero size */
50 last_addr = addr + size - 1;
51 if (!size || last_addr < addr)
52 return NULL;
53
54 /*
55 * Mappings have to be page-aligned
56 */
57 offset = addr & ~PAGE_MASK;
58 p = addr & PAGE_MASK;
59 size = PAGE_ALIGN(last_addr + 1) - p;
60
61 if (likely(mem_init_done)) {
62 area = get_vm_area(size, VM_IOREMAP);
63 if (!area)
64 return NULL;
65 v = (unsigned long)area->addr;
66 } else {
67 if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS)
68 return NULL;
69 v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used);
70 fixmaps_used += (size >> PAGE_SHIFT);
71 }
72
73 if (ioremap_page_range(v, v + size, p, prot)) {
74 if (likely(mem_init_done))
75 vfree(area->addr);
76 else
77 fixmaps_used -= (size >> PAGE_SHIFT);
78 return NULL;
79 }
80
81 return (void __iomem *)(offset + (char *)v);
82}
a0eba4f7 83EXPORT_SYMBOL(__ioremap);
61e85e36
JB
84
85void iounmap(void *addr)
86{
87 /* If the page is from the fixmap pool then we just clear out
88 * the fixmap mapping.
89 */
90 if (unlikely((unsigned long)addr > FIXADDR_START)) {
91 /* This is a bit broken... we don't really know
92 * how big the area is so it's difficult to know
93 * how many fixed pages to invalidate...
94 * just flush tlb and hope for the best...
95 * consider this a FIXME
96 *
97 * Really we should be clearing out one or more page
98 * table entries for these virtual addresses so that
99 * future references cause a page fault... for now, we
100 * rely on two things:
101 * i) this code never gets called on known boards
102 * ii) invalid accesses to the freed areas aren't made
103 */
104 flush_tlb_all();
105 return;
106 }
107
108 return vfree((void *)(PAGE_MASK & (unsigned long)addr));
109}
a0eba4f7 110EXPORT_SYMBOL(iounmap);
61e85e36
JB
111
112/**
113 * OK, this one's a bit tricky... ioremap can get called before memory is
114 * initialized (early serial console does this) and will want to alloc a page
115 * for its mapping. No userspace pages will ever get allocated before memory
116 * is initialized so this applies only to kernel pages. In the event that
117 * this is called before memory is initialized we allocate the page using
118 * the memblock infrastructure.
119 */
120
bd721ea7 121pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm,
61e85e36
JB
122 unsigned long address)
123{
124 pte_t *pte;
125
126 if (likely(mem_init_done)) {
32d6bd90 127 pte = (pte_t *) __get_free_page(GFP_KERNEL);
61e85e36 128 } else {
61e85e36 129 pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
61e85e36
JB
130 }
131
132 if (pte)
133 clear_page(pte);
134 return pte;
135}