]>
Commit | Line | Data |
---|---|---|
74d47992 GU |
1 | #ifndef M68K_MCF_PGALLOC_H |
2 | #define M68K_MCF_PGALLOC_H | |
3 | ||
4 | #include <asm/tlb.h> | |
5 | #include <asm/tlbflush.h> | |
6 | ||
7 | extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | |
8 | { | |
9 | free_page((unsigned long) pte); | |
10 | } | |
11 | ||
12 | extern const char bad_pmd_string[]; | |
13 | ||
14 | extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |
15 | unsigned long address) | |
16 | { | |
32d6bd90 | 17 | unsigned long page = __get_free_page(GFP_DMA); |
74d47992 GU |
18 | |
19 | if (!page) | |
20 | return NULL; | |
21 | ||
22 | memset((void *)page, 0, PAGE_SIZE); | |
23 | return (pte_t *) (page); | |
24 | } | |
25 | ||
26 | extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address) | |
27 | { | |
28 | return (pmd_t *) pgd; | |
29 | } | |
30 | ||
31 | #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) | |
32 | #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) | |
33 | ||
34 | #define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr) | |
35 | ||
36 | #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \ | |
37 | (unsigned long)(page_address(page))) | |
38 | ||
39 | #define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte)) | |
40 | ||
41 | #define pmd_pgtable(pmd) pmd_page(pmd) | |
42 | ||
43 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, | |
44 | unsigned long address) | |
45 | { | |
46 | __free_page(page); | |
47 | } | |
48 | ||
49 | #define __pmd_free_tlb(tlb, pmd, address) do { } while (0) | |
50 | ||
51 | static inline struct page *pte_alloc_one(struct mm_struct *mm, | |
52 | unsigned long address) | |
53 | { | |
32d6bd90 | 54 | struct page *page = alloc_pages(GFP_DMA, 0); |
74d47992 GU |
55 | pte_t *pte; |
56 | ||
57 | if (!page) | |
58 | return NULL; | |
f84c914b KS |
59 | if (!pgtable_page_ctor(page)) { |
60 | __free_page(page); | |
61 | return NULL; | |
62 | } | |
74d47992 GU |
63 | |
64 | pte = kmap(page); | |
65 | if (pte) { | |
66 | clear_page(pte); | |
67 | __flush_page_to_ram(pte); | |
68 | flush_tlb_kernel_page(pte); | |
69 | nocache_page(pte); | |
70 | } | |
71 | kunmap(page); | |
72 | ||
73 | return page; | |
74 | } | |
75 | ||
76 | extern inline void pte_free(struct mm_struct *mm, struct page *page) | |
77 | { | |
78 | __free_page(page); | |
79 | } | |
80 | ||
81 | /* | |
82 | * In our implementation, each pgd entry contains 1 pmd that is never allocated | |
83 | * or freed. pgd_present is always 1, so this should never be called. -NL | |
84 | */ | |
85 | #define pmd_free(mm, pmd) BUG() | |
86 | ||
87 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
88 | { | |
89 | free_page((unsigned long) pgd); | |
90 | } | |
91 | ||
92 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |
93 | { | |
94 | pgd_t *new_pgd; | |
95 | ||
96 | new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN); | |
97 | if (!new_pgd) | |
98 | return NULL; | |
99 | memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE); | |
100 | memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT); | |
101 | return new_pgd; | |
102 | } | |
103 | ||
104 | #define pgd_populate(mm, pmd, pte) BUG() | |
105 | ||
106 | #endif /* M68K_MCF_PGALLOC_H */ |