]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blobdiff - mm/memblock.c
mm/sparsemem: fix 'mem_section' will never be NULL gcc 12 warning
[mirror_ubuntu-focal-kernel.git] / mm / memblock.c
index 7d4f61ae666a61123bea4b59b9b02f90ba401f3a..a75cc65f033078336f938a3c1442375f95782cad 100644 (file)
@@ -164,6 +164,8 @@ bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
 {
        unsigned long i;
 
+       memblock_cap_size(base, &size);
+
        for (i = 0; i < type->cnt; i++)
                if (memblock_addrs_overlap(base, size, type->regions[i].base,
                                           type->regions[i].size))
@@ -257,14 +259,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
  *
  * Find @size free area aligned to @align in the specified range and node.
  *
- * When allocation direction is bottom-up, the @start should be greater
- * than the end of the kernel image. Otherwise, it will be trimmed. The
- * reason is that we want the bottom-up allocation just near the kernel
- * image so it is highly likely that the allocated memory and the kernel
- * will reside in the same node.
- *
- * If bottom-up allocation failed, will try to allocate memory top-down.
- *
  * Return:
  * Found address on success, 0 on failure.
  */
@@ -273,8 +267,6 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
                                        phys_addr_t end, int nid,
                                        enum memblock_flags flags)
 {
-       phys_addr_t kernel_end, ret;
-
        /* pump up @end */
        if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
            end == MEMBLOCK_ALLOC_KASAN)
@@ -283,40 +275,13 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
        /* avoid allocating the first page */
        start = max_t(phys_addr_t, start, PAGE_SIZE);
        end = max(start, end);
-       kernel_end = __pa_symbol(_end);
 
-       /*
-        * try bottom-up allocation only when bottom-up mode
-        * is set and @end is above the kernel image.
-        */
-       if (memblock_bottom_up() && end > kernel_end) {
-               phys_addr_t bottom_up_start;
-
-               /* make sure we will allocate above the kernel */
-               bottom_up_start = max(start, kernel_end);
-
-               /* ok, try bottom-up allocation first */
-               ret = __memblock_find_range_bottom_up(bottom_up_start, end,
-                                                     size, align, nid, flags);
-               if (ret)
-                       return ret;
-
-               /*
-                * we always limit bottom-up allocation above the kernel,
-                * but top-down allocation doesn't have the limit, so
-                * retrying top-down allocation may succeed when bottom-up
-                * allocation failed.
-                *
-                * bottom-up allocation is expected to be fail very rarely,
-                * so we use WARN_ONCE() here to see the stack trace if
-                * fail happens.
-                */
-               WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
-                         "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
-       }
-
-       return __memblock_find_range_top_down(start, end, size, align, nid,
-                                             flags);
+       if (memblock_bottom_up())
+               return __memblock_find_range_bottom_up(start, end, size, align,
+                                                      nid, flags);
+       else
+               return __memblock_find_range_top_down(start, end, size, align,
+                                                     nid, flags);
 }
 
 /**
@@ -383,14 +348,20 @@ void __init memblock_discard(void)
                addr = __pa(memblock.reserved.regions);
                size = PAGE_ALIGN(sizeof(struct memblock_region) *
                                  memblock.reserved.max);
-               __memblock_free_late(addr, size);
+               if (memblock_reserved_in_slab)
+                       kfree(memblock.reserved.regions);
+               else
+                       __memblock_free_late(addr, size);
        }
 
        if (memblock.memory.regions != memblock_memory_init_regions) {
                addr = __pa(memblock.memory.regions);
                size = PAGE_ALIGN(sizeof(struct memblock_region) *
                                  memblock.memory.max);
-               __memblock_free_late(addr, size);
+               if (memblock_memory_in_slab)
+                       kfree(memblock.memory.regions);
+               else
+                       __memblock_free_late(addr, size);
        }
 }
 #endif
@@ -1356,9 +1327,6 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
                align = SMP_CACHE_BYTES;
        }
 
-       if (end > memblock.current_limit)
-               end = memblock.current_limit;
-
 again:
        found = memblock_find_in_range_node(size, align, start, end, nid,
                                            flags);
@@ -1469,6 +1437,9 @@ static void * __init memblock_alloc_internal(
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, nid);
 
+       if (max_addr > memblock.current_limit)
+               max_addr = memblock.current_limit;
+
        alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
 
        /* retry allocation without lower limit */
@@ -1797,7 +1768,6 @@ bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t siz
  */
 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
 {
-       memblock_cap_size(base, &size);
        return memblock_overlaps_region(&memblock.reserved, base, size);
 }