]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
s390/mem_detect: move tprot loop to early boot phase
authorVasily Gorbik <gor@linux.ibm.com>
Wed, 11 Apr 2018 09:56:55 +0000 (11:56 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Tue, 9 Oct 2018 09:21:08 +0000 (11:21 +0200)
Move memory detection to early boot phase. To store online memory
regions "struct mem_detect_info" has been introduced together with
for_each_mem_detect_block iterator. mem_detect_info is later converted
to memblock.

Also introduces sclp_early_get_meminfo function to get maximum physical
memory and maximum increment number.

Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/boot/Makefile
arch/s390/boot/boot.h
arch/s390/boot/mem_detect.c [new file with mode: 0644]
arch/s390/boot/startup.c
arch/s390/include/asm/mem_detect.h [new file with mode: 0644]
arch/s390/include/asm/sclp.h
arch/s390/include/asm/setup.h
arch/s390/kernel/setup.c
arch/s390/mm/Makefile
arch/s390/mm/mem_detect.c [deleted file]
drivers/s390/char/sclp_early_core.c

index 1b5a95b1ab09b8994dab055871490c2044cabed1..5e2cec6e4b3e58c7b3c03ee947ca8d2a6aeeb476 100644 (file)
@@ -27,7 +27,7 @@ endif
 
 CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
 
-obj-y  := head.o als.o startup.o ebcdic.o sclp_early_core.o mem.o
+obj-y  := head.o als.o startup.o mem_detect.o ebcdic.o sclp_early_core.o mem.o
 targets        := bzImage startup.a $(obj-y)
 subdir-        := compressed
 
index 36c93e6cbc3f46554edbaad332be56282b95b34a..808154b99a5d6933e864efea9def1d0af3e3aa5a 100644 (file)
@@ -3,5 +3,6 @@
 #define BOOT_BOOT_H
 
 void startup_kernel(void);
+void detect_memory(void);
 
 #endif /* BOOT_BOOT_H */
diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
new file mode 100644 (file)
index 0000000..920e6fe
--- /dev/null
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <asm/sclp.h>
+#include <asm/sections.h>
+#include <asm/mem_detect.h>
+#include "compressed/decompressor.h"
+#include "boot.h"
+
+#define CHUNK_READ_WRITE 0
+#define CHUNK_READ_ONLY  1
+
+unsigned long __bootdata(max_physmem_end);
+struct mem_detect_info __bootdata(mem_detect);
+
+/* up to 256 storage elements, 1020 subincrements each */
+#define ENTRIES_EXTENDED_MAX                                                  \
+       (256 * (1020 / 2) * sizeof(struct mem_detect_block))
+
+/*
+ * To avoid corrupting old kernel memory during dump, find lowest memory
+ * chunk possible either right after the kernel end (decompressed kernel) or
+ * after initrd (if it is present and there is no hole between the kernel end
+ * and initrd)
+ */
+static void *mem_detect_alloc_extended(void)
+{
+       unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
+
+       if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
+           INITRD_START < offset + ENTRIES_EXTENDED_MAX)
+               offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
+
+       return (void *)offset;
+}
+
+static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
+{
+       if (n < MEM_INLINED_ENTRIES)
+               return &mem_detect.entries[n];
+       if (unlikely(!mem_detect.entries_extended))
+               mem_detect.entries_extended = mem_detect_alloc_extended();
+       return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
+}
+
+/*
+ * sequential calls to add_mem_detect_block with adjacent memory areas
+ * are merged together into single memory block.
+ */
+void add_mem_detect_block(u64 start, u64 end)
+{
+       struct mem_detect_block *block;
+
+       if (mem_detect.count) {
+               block = __get_mem_detect_block_ptr(mem_detect.count - 1);
+               if (block->end == start) {
+                       block->end = end;
+                       return;
+               }
+       }
+
+       block = __get_mem_detect_block_ptr(mem_detect.count);
+       block->start = start;
+       block->end = end;
+       mem_detect.count++;
+}
+
+static unsigned long get_mem_detect_end(void)
+{
+       if (mem_detect.count)
+               return __get_mem_detect_block_ptr(mem_detect.count - 1)->end;
+       return 0;
+}
+
+static int tprot(unsigned long addr)
+{
+       unsigned long pgm_addr;
+       int rc = -EFAULT;
+       psw_t old = S390_lowcore.program_new_psw;
+
+       S390_lowcore.program_new_psw.mask = __extract_psw();
+       asm volatile(
+               "       larl    %[pgm_addr],1f\n"
+               "       stg     %[pgm_addr],%[psw_pgm_addr]\n"
+               "       tprot   0(%[addr]),0\n"
+               "       ipm     %[rc]\n"
+               "       srl     %[rc],28\n"
+               "1:\n"
+               : [pgm_addr] "=&d"(pgm_addr),
+                 [psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
+                 [rc] "+&d"(rc)
+               : [addr] "a"(addr)
+               : "cc", "memory");
+       S390_lowcore.program_new_psw = old;
+       return rc;
+}
+
+static void scan_memory(unsigned long rzm)
+{
+       unsigned long addr, size;
+       int type;
+
+       if (!rzm)
+               rzm = 1UL << 20;
+
+       addr = 0;
+       do {
+               size = 0;
+               /* assume lowcore is writable */
+               type = addr ? tprot(addr) : CHUNK_READ_WRITE;
+               do {
+                       size += rzm;
+                       if (max_physmem_end && addr + size >= max_physmem_end)
+                               break;
+               } while (type == tprot(addr + size));
+               if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
+                       if (max_physmem_end && (addr + size > max_physmem_end))
+                               size = max_physmem_end - addr;
+                       add_mem_detect_block(addr, addr + size);
+               }
+               addr += size;
+       } while (addr < max_physmem_end);
+}
+
+void detect_memory(void)
+{
+       unsigned long rzm;
+
+       sclp_early_get_meminfo(&max_physmem_end, &rzm);
+       scan_memory(rzm);
+       mem_detect.info_source = MEM_DETECT_TPROT_LOOP;
+       if (!max_physmem_end)
+               max_physmem_end = get_mem_detect_end();
+}
index 78651a2c26b0ee82ba490de12b8f3bf07e4ff301..b0e9f4619203848f3ab799bf2dff3407f16a3bac 100644 (file)
@@ -51,6 +51,7 @@ void startup_kernel(void)
 
        rescue_initrd();
        sclp_early_read_info();
+       detect_memory();
        if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
                img = decompress_kernel();
                memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
diff --git a/arch/s390/include/asm/mem_detect.h b/arch/s390/include/asm/mem_detect.h
new file mode 100644 (file)
index 0000000..8586ade
--- /dev/null
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_MEM_DETECT_H
+#define _ASM_S390_MEM_DETECT_H
+
+#include <linux/types.h>
+
+enum mem_info_source {
+       MEM_DETECT_NONE = 0,
+       MEM_DETECT_TPROT_LOOP
+};
+
+struct mem_detect_block {
+       u64 start;
+       u64 end;
+};
+
+/*
+ * Storage element id is defined as 1 byte (up to 256 storage elements).
+ * In practise only storage element id 0 and 1 are used).
+ * According to architecture one storage element could have as much as
+ * 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
+ * If more mem_detect_blocks are required, a block of memory from already
+ * known mem_detect_block is taken (entries_extended points to it).
+ */
+#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
+
+struct mem_detect_info {
+       u32 count;
+       u8 info_source;
+       struct mem_detect_block entries[MEM_INLINED_ENTRIES];
+       struct mem_detect_block *entries_extended;
+};
+extern struct mem_detect_info mem_detect;
+
+static inline int __get_mem_detect_block(u32 n, unsigned long *start,
+                                        unsigned long *end)
+{
+       if (n >= mem_detect.count) {
+               *start = 0;
+               *end = 0;
+               return -1;
+       }
+
+       if (n < MEM_INLINED_ENTRIES) {
+               *start = (unsigned long)mem_detect.entries[n].start;
+               *end = (unsigned long)mem_detect.entries[n].end;
+       } else {
+               *start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
+               *end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
+       }
+       return 0;
+}
+
+/**
+ * for_each_mem_detect_block - early online memory range iterator
+ * @i: an integer used as loop variable
+ * @p_start: ptr to unsigned long for start address of the range
+ * @p_end: ptr to unsigned long for end address of the range
+ *
+ * Walks over detected online memory ranges.
+ */
+#define for_each_mem_detect_block(i, p_start, p_end)                   \
+       for (i = 0, __get_mem_detect_block(i, p_start, p_end);          \
+            i < mem_detect.count;                                      \
+            i++, __get_mem_detect_block(i, p_start, p_end))
+
+static inline void get_mem_detect_reserved(unsigned long *start,
+                                          unsigned long *size)
+{
+       *start = (unsigned long)mem_detect.entries_extended;
+       if (mem_detect.count > MEM_INLINED_ENTRIES)
+               *size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
+       else
+               *size = 0;
+}
+
+#endif
index 7df57bd09aa1797ca2cd769be8e0278238e9954d..c21a8b637a1176b757364fbc3413ddb089624ff5 100644 (file)
@@ -113,6 +113,7 @@ void sclp_early_printk(const char *s);
 void sclp_early_printk_force(const char *s);
 void __sclp_early_printk(const char *s, unsigned int len, unsigned int force);
 
+int sclp_early_get_meminfo(unsigned long *mem, unsigned long *rzm);
 int _sclp_get_core_info(struct sclp_core_info *info);
 int sclp_core_configure(u8 core);
 int sclp_core_deconfigure(u8 core);
index 1d66016f417020ee99324db641fc813c56b0cfc5..522e4553373af588820359e925ffe092b9d38cde 100644 (file)
@@ -69,8 +69,6 @@ extern int memory_end_set;
 extern unsigned long memory_end;
 extern unsigned long max_physmem_end;
 
-extern void detect_memory_memblock(void);
-
 #define MACHINE_IS_VM          (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
 #define MACHINE_IS_KVM         (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
 #define MACHINE_IS_LPAR                (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
index 67fa7cb8ae804f8bb3e86785da2e000b74fbe967..fdf9bd964deeb979c187d6d4461787687aa68a3d 100644 (file)
@@ -70,6 +70,7 @@
 #include <asm/numa.h>
 #include <asm/alternative.h>
 #include <asm/nospec-branch.h>
+#include <asm/mem_detect.h>
 #include "entry.h"
 
 /*
@@ -91,7 +92,8 @@ unsigned long int_hwcap = 0;
 
 int __initdata memory_end_set;
 unsigned long __initdata memory_end;
-unsigned long __initdata max_physmem_end;
+unsigned long __bootdata(max_physmem_end);
+struct mem_detect_info __bootdata(mem_detect);
 
 unsigned long VMALLOC_START;
 EXPORT_SYMBOL(VMALLOC_START);
@@ -720,6 +722,45 @@ static void __init reserve_initrd(void)
 #endif
 }
 
+static void __init reserve_mem_detect_info(void)
+{
+       unsigned long start, size;
+
+       get_mem_detect_reserved(&start, &size);
+       if (size)
+               memblock_reserve(start, size);
+}
+
+static void __init free_mem_detect_info(void)
+{
+       unsigned long start, size;
+
+       get_mem_detect_reserved(&start, &size);
+       if (size)
+               memblock_free(start, size);
+}
+
+static void __init memblock_physmem_add(phys_addr_t start, phys_addr_t size)
+{
+       memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
+                    start, start + size - 1);
+       memblock_add_range(&memblock.memory, start, size, 0, 0);
+       memblock_add_range(&memblock.physmem, start, size, 0, 0);
+}
+
+static void __init memblock_add_mem_detect_info(void)
+{
+       unsigned long start, end;
+       int i;
+
+       /* keep memblock lists close to the kernel */
+       memblock_set_bottom_up(true);
+       for_each_mem_detect_block(i, &start, &end)
+               memblock_physmem_add(start, end - start);
+       memblock_set_bottom_up(false);
+       memblock_dump_all();
+}
+
 /*
  * Check for initrd being in usable memory
  */
@@ -984,11 +1025,13 @@ void __init setup_arch(char **cmdline_p)
        reserve_oldmem();
        reserve_kernel();
        reserve_initrd();
+       reserve_mem_detect_info();
        memblock_allow_resize();
 
        /* Get information about *all* installed memory */
-       detect_memory_memblock();
+       memblock_add_mem_detect_info();
 
+       free_mem_detect_info();
        remove_oldmem();
 
        /*
index 33fe418506bc7743606a7bf2299e8a715741c09c..83c83c69cab220a5f0e10acc30fdd8f306cbdcd1 100644 (file)
@@ -4,8 +4,7 @@
 #
 
 obj-y          := init.o fault.o extmem.o mmap.o vmem.o maccess.o
-obj-y          += page-states.o gup.o pageattr.o mem_detect.o
-obj-y          += pgtable.o pgalloc.o
+obj-y          += page-states.o gup.o pageattr.o pgtable.o pgalloc.o
 
 obj-$(CONFIG_CMM)              += cmm.o
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
deleted file mode 100644 (file)
index 21f6c82..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright IBM Corp. 2008, 2009
- *
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/memblock.h>
-#include <linux/init.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <asm/ipl.h>
-#include <asm/sclp.h>
-#include <asm/setup.h>
-
-#define CHUNK_READ_WRITE 0
-#define CHUNK_READ_ONLY  1
-
-static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
-{
-       memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
-                    start, start + size - 1);
-       memblock_add_range(&memblock.memory, start, size, 0, 0);
-       memblock_add_range(&memblock.physmem, start, size, 0, 0);
-}
-
-void __init detect_memory_memblock(void)
-{
-       unsigned long memsize, rnmax, rzm, addr, size;
-       int type;
-
-       rzm = sclp.rzm;
-       rnmax = sclp.rnmax;
-       memsize = rzm * rnmax;
-       if (!rzm)
-               rzm = 1UL << 17;
-       max_physmem_end = memsize;
-       addr = 0;
-       /* keep memblock lists close to the kernel */
-       memblock_set_bottom_up(true);
-       do {
-               size = 0;
-               /* assume lowcore is writable */
-               type = addr ? tprot(addr) : CHUNK_READ_WRITE;
-               do {
-                       size += rzm;
-                       if (max_physmem_end && addr + size >= max_physmem_end)
-                               break;
-               } while (type == tprot(addr + size));
-               if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
-                       if (max_physmem_end && (addr + size > max_physmem_end))
-                               size = max_physmem_end - addr;
-                       memblock_physmem_add(addr, size);
-               }
-               addr += size;
-       } while (addr < max_physmem_end);
-       memblock_set_bottom_up(false);
-       if (!max_physmem_end)
-               max_physmem_end = memblock_end_of_DRAM();
-       memblock_dump_all();
-}
index bbea2154a8072499abb8b1af27b685d868a7a9a4..4f04ba6897715c0496f07d40dd79cd52e631e9f0 100644 (file)
@@ -270,3 +270,20 @@ int __init sclp_early_get_info(struct read_info_sccb *info)
        *info = sclp_info_sccb;
        return 0;
 }
+
+int __init sclp_early_get_meminfo(unsigned long *mem, unsigned long *rzm)
+{
+       unsigned long rnmax;
+       unsigned long rnsize;
+       struct read_info_sccb *sccb = &sclp_info_sccb;
+
+       if (!sclp_info_sccb_valid)
+               return -EIO;
+
+       rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
+       rnsize = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
+       rnsize <<= 20;
+       *mem = rnsize * rnmax;
+       *rzm = rnsize;
+       return 0;
+}