]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - arch/powerpc/mm/init-common.c
powerpc: Add skeleton for Kernel Userspace Execution Prevention
[mirror_ubuntu-eoan-kernel.git] / arch / powerpc / mm / init-common.c
CommitLineData
9b081e10
CL
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 *
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11 *
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#undef DEBUG
23
24#include <linux/string.h>
25#include <asm/pgalloc.h>
26#include <asm/pgtable.h>
69795cab
CL
27#include <asm/kup.h>
28
0fb1c25a
CL
29static bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
30
31static int __init parse_nosmep(char *p)
32{
33 disable_kuep = true;
34 pr_warn("Disabling Kernel Userspace Execution Prevention\n");
35 return 0;
36}
37early_param("nosmep", parse_nosmep);
38
69795cab
CL
39void __init setup_kup(void)
40{
0fb1c25a 41 setup_kuep(disable_kuep);
69795cab 42}
9b081e10 43
1e03c7e2
CL
44#define CTOR(shift) static void ctor_##shift(void *addr) \
45{ \
46 memset(addr, 0, sizeof(void *) << (shift)); \
9b081e10
CL
47}
48
1e03c7e2
CL
49CTOR(0); CTOR(1); CTOR(2); CTOR(3); CTOR(4); CTOR(5); CTOR(6); CTOR(7);
50CTOR(8); CTOR(9); CTOR(10); CTOR(11); CTOR(12); CTOR(13); CTOR(14); CTOR(15);
9b081e10 51
1e03c7e2 52static inline void (*ctor(int shift))(void *)
9b081e10 53{
1e03c7e2
CL
54 BUILD_BUG_ON(MAX_PGTABLE_INDEX_SIZE != 15);
55
56 switch (shift) {
57 case 0: return ctor_0;
58 case 1: return ctor_1;
59 case 2: return ctor_2;
60 case 3: return ctor_3;
61 case 4: return ctor_4;
62 case 5: return ctor_5;
63 case 6: return ctor_6;
64 case 7: return ctor_7;
65 case 8: return ctor_8;
66 case 9: return ctor_9;
67 case 10: return ctor_10;
68 case 11: return ctor_11;
69 case 12: return ctor_12;
70 case 13: return ctor_13;
71 case 14: return ctor_14;
72 case 15: return ctor_15;
73 }
74 return NULL;
9b081e10
CL
75}
76
129dd323 77struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE + 1];
ba9b399a 78EXPORT_SYMBOL_GPL(pgtable_cache); /* used by kvm_hv module */
9b081e10
CL
79
80/*
81 * Create a kmem_cache() for pagetables. This is not used for PTE
82 * pages - they're linked to struct page, come from the normal free
83 * pages pool and have a different entry size (see real_pte_t) to
84 * everything else. Caches created by this function are used for all
85 * the higher level pagetables, and for hugepage pagetables.
86 */
1e03c7e2 87void pgtable_cache_add(unsigned int shift)
9b081e10
CL
88{
89 char *name;
90 unsigned long table_size = sizeof(void *) << shift;
91 unsigned long align = table_size;
92
93 /* When batching pgtable pointers for RCU freeing, we store
94 * the index size in the low bits. Table alignment must be
95 * big enough to fit it.
96 *
97 * Likewise, hugeapge pagetable pointers contain a (different)
98 * shift value in the low bits. All tables must be aligned so
99 * as to leave enough 0 bits in the address to contain it. */
100 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
101 HUGEPD_SHIFT_MASK + 1);
102 struct kmem_cache *new;
103
104 /* It would be nice if this was a BUILD_BUG_ON(), but at the
105 * moment, gcc doesn't seem to recognize is_power_of_2 as a
106 * constant expression, so so much for that. */
107 BUG_ON(!is_power_of_2(minalign));
129dd323 108 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
9b081e10
CL
109
110 if (PGT_CACHE(shift))
111 return; /* Already have a cache of this size */
112
113 align = max_t(unsigned long, align, minalign);
114 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
1e03c7e2 115 new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
bf5ca68d
NP
116 if (!new)
117 panic("Could not allocate pgtable cache for order %d", shift);
118
9b081e10 119 kfree(name);
129dd323 120 pgtable_cache[shift] = new;
bf5ca68d 121
9b081e10
CL
122 pr_debug("Allocated pgtable cache for order %d\n", shift);
123}
ba9b399a 124EXPORT_SYMBOL_GPL(pgtable_cache_add); /* used by kvm_hv module */
9b081e10
CL
125
126void pgtable_cache_init(void)
127{
1e03c7e2 128 pgtable_cache_add(PGD_INDEX_SIZE);
9b081e10 129
32bff4b9 130 if (PMD_CACHE_INDEX)
1e03c7e2 131 pgtable_cache_add(PMD_CACHE_INDEX);
9b081e10
CL
132 /*
133 * In all current configs, when the PUD index exists it's the
134 * same size as either the pgd or pmd index except with THP enabled
135 * on book3s 64
136 */
32bff4b9 137 if (PUD_CACHE_INDEX)
1e03c7e2 138 pgtable_cache_add(PUD_CACHE_INDEX);
9b081e10 139}