unsigned htab_shift_for_mem_size(unsigned long mem_size);
-#endif /* __ASSEMBLY__ */
+enum slb_index {
+ LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
+ KSTACK_INDEX = 1, /* Kernel stack map */
+};
+#define slb_esid_mask(ssize) \
+ (((ssize) == MMU_SEGSIZE_256M) ? ESID_MASK : ESID_MASK_1T)
+
+static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
+ enum slb_index index)
+{
+ return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
+}
+
+static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
+ unsigned long flags)
+{
+ return (vsid << slb_vsid_shift(ssize)) | flags |
+ ((unsigned long)ssize << SLB_VSID_SSIZE_SHIFT);
+}
+
+static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
+ unsigned long flags)
+{
+ return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
+}
+
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
#include "internal.h"
-enum slb_index {
- LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
- KSTACK_INDEX = 1, /* Kernel stack map */
-};
-
static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
-#define slb_esid_mask(ssize) \
- (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
-
-static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
- enum slb_index index)
-{
- return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
-}
-
-static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
- unsigned long flags)
-{
- return (vsid << slb_vsid_shift(ssize)) | flags |
- ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
-}
-
-static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
- unsigned long flags)
-{
- return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
-}
-
bool stress_slb_enabled __initdata;
static int __init parse_stress_slb(char *p)
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <asm/mmu.h>
+
+/* Inserts new slb entries */
+static void insert_slb_entry(unsigned long p, int ssize, int page_size)
+{
+ unsigned long flags;
+
+ flags = SLB_VSID_KERNEL | mmu_psize_defs[page_size].sllp;
+ preempt_disable();
+
+ asm volatile("slbmte %0,%1" :
+ : "r" (mk_vsid_data(p, ssize, flags)),
+ "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED))
+ : "memory");
+
+ asm volatile("slbmte %0,%1" :
+ : "r" (mk_vsid_data(p, ssize, flags)),
+ "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED + 1))
+ : "memory");
+ preempt_enable();
+}
+
+/* Inject slb multihit on vmalloc-ed address i.e 0xD00... */
+static int inject_vmalloc_slb_multihit(void)
+{
+ char *p;
+
+ p = vmalloc(PAGE_SIZE);
+ if (!p)
+ return -ENOMEM;
+
+ insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_vmalloc_psize);
+ /*
+ * This triggers exception, If handled correctly we must recover
+ * from this error.
+ */
+ p[0] = '!';
+ vfree(p);
+ return 0;
+}
+
+/* Inject slb multihit on kmalloc-ed address i.e 0xC00... */
+static int inject_kmalloc_slb_multihit(void)
+{
+ char *p;
+
+ p = kmalloc(2048, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_linear_psize);
+ /*
+ * This triggers exception, If handled correctly we must recover
+ * from this error.
+ */
+ p[0] = '!';
+ kfree(p);
+ return 0;
+}
+
+/*
+ * Few initial SLB entries are bolted. Add a test to inject
+ * multihit in bolted entry 0.
+ */
+static void insert_dup_slb_entry_0(void)
+{
+ unsigned long test_address = PAGE_OFFSET, *test_ptr;
+ unsigned long esid, vsid;
+ unsigned long i = 0;
+
+ test_ptr = (unsigned long *)test_address;
+ preempt_disable();
+
+ asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
+ asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
+
+ /* for i !=0 we would need to mask out the old entry number */
+ asm volatile("slbmte %0,%1" :
+ : "r" (vsid),
+ "r" (esid | SLB_NUM_BOLTED)
+ : "memory");
+
+ asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
+ asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
+
+ /* for i !=0 we would need to mask out the old entry number */
+ asm volatile("slbmte %0,%1" :
+ : "r" (vsid),
+ "r" (esid | (SLB_NUM_BOLTED + 1))
+ : "memory");
+
+ pr_info("%s accessing test address 0x%lx: 0x%lx\n",
+ __func__, test_address, *test_ptr);
+
+ preempt_enable();
+}
+
+void lkdtm_PPC_SLB_MULTIHIT(void)
+{
+ if (!radix_enabled()) {
+ pr_info("Injecting SLB multihit errors\n");
+ /*
+ * These need not be separate tests, And they do pretty
+ * much same thing. In any case we must recover from the
+ * errors introduced by these functions, machine would not
+ * survive these tests in case of failure to handle.
+ */
+ inject_vmalloc_slb_multihit();
+ inject_kmalloc_slb_multihit();
+ insert_dup_slb_entry_0();
+ pr_info("Recovered from SLB multihit errors\n");
+ } else {
+ pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n");
+ }
+}