]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mm: rename page_order() to buddy_order()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 16 Oct 2020 03:10:15 +0000 (20:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 16 Oct 2020 18:11:19 +0000 (11:11 -0700)
The current page_order() can only be called on pages in the buddy
allocator.  For compound pages, you have to use compound_order().  This is
confusing and led to a bug, so rename page_order() to buddy_order().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Link: https://lkml.kernel.org/r/20201001152259.14932-2-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/compaction.c
mm/internal.h
mm/page_alloc.c
mm/page_isolation.c
mm/page_owner.c
mm/page_reporting.c
mm/shuffle.c

index 6c63844fc06144b012eebbc915f28ffad7e7aabd..6e0ee5641788655ac0ce89a3bf6b1b46c5715f00 100644 (file)
@@ -625,7 +625,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                }
 
                /* Found a free page, will break it into order-0 pages */
-               order = page_order(page);
+               order = buddy_order(page);
                isolated = __isolate_free_page(page, order);
                if (!isolated)
                        break;
@@ -898,7 +898,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                 * potential isolation targets.
                 */
                if (PageBuddy(page)) {
-                       unsigned long freepage_order = page_order_unsafe(page);
+                       unsigned long freepage_order = buddy_order_unsafe(page);
 
                        /*
                         * Without lock, we cannot be sure that what we got is
@@ -1172,7 +1172,7 @@ static bool suitable_migration_target(struct compact_control *cc,
                 * the only small danger is that we skip a potentially suitable
                 * pageblock, so it's not worth to check order for valid range.
                 */
-               if (page_order_unsafe(page) >= pageblock_order)
+               if (buddy_order_unsafe(page) >= pageblock_order)
                        return false;
        }
 
index 6345b08ce86ccfeed5d183006065532c082c8980..c43ccdddb0f6e92e712dfd2dafe144018e7a4440 100644 (file)
@@ -270,16 +270,16 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
  * page from being allocated in parallel and returning garbage as the order.
  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
  * page cannot be allocated or merged in parallel. Alternatively, it must
- * handle invalid values gracefully, and use page_order_unsafe() below.
+ * handle invalid values gracefully, and use buddy_order_unsafe() below.
  */
-static inline unsigned int page_order(struct page *page)
+static inline unsigned int buddy_order(struct page *page)
 {
        /* PageBuddy() must be checked by the caller */
        return page_private(page);
 }
 
 /*
- * Like page_order(), but for callers who cannot afford to hold the zone lock.
+ * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
  * PageBuddy() should be checked first by the caller to minimize race window,
  * and invalid values must be handled gracefully.
  *
@@ -289,7 +289,7 @@ static inline unsigned int page_order(struct page *page)
  * times, potentially observing different values in the tests and the actual
  * use of the result.
  */
-#define page_order_unsafe(page)                READ_ONCE(page_private(page))
+#define buddy_order_unsafe(page)       READ_ONCE(page_private(page))
 
 static inline bool is_cow_mapping(vm_flags_t flags)
 {
index 3b032dac62e62e3bc86a34ab5c1436e01dc7da31..ccf615c0627e1d34845f116ab12457c00a4fcb00 100644 (file)
@@ -792,7 +792,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
                                unsigned int order, int migratetype) {}
 #endif
 
-static inline void set_page_order(struct page *page, unsigned int order)
+static inline void set_buddy_order(struct page *page, unsigned int order)
 {
        set_page_private(page, order);
        __SetPageBuddy(page);
@@ -817,7 +817,7 @@ static inline bool page_is_buddy(struct page *page, struct page *buddy,
        if (!page_is_guard(buddy) && !PageBuddy(buddy))
                return false;
 
-       if (page_order(buddy) != order)
+       if (buddy_order(buddy) != order)
                return false;
 
        /*
@@ -1059,7 +1059,7 @@ continue_merging:
        }
 
 done_merging:
-       set_page_order(page, order);
+       set_buddy_order(page, order);
 
        if (fpi_flags & FPI_TO_TAIL)
                to_tail = true;
@@ -2178,7 +2178,7 @@ static inline void expand(struct zone *zone, struct page *page,
                        continue;
 
                add_to_free_list(&page[size], zone, high, migratetype);
-               set_page_order(&page[size], high);
+               set_buddy_order(&page[size], high);
        }
 }
 
@@ -2392,7 +2392,7 @@ static int move_freepages(struct zone *zone,
                VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
                VM_BUG_ON_PAGE(page_zone(page) != zone, page);
 
-               order = page_order(page);
+               order = buddy_order(page);
                move_to_free_list(page, zone, order, migratetype);
                page += 1 << order;
                pages_moved += 1 << order;
@@ -2516,7 +2516,7 @@ static inline void boost_watermark(struct zone *zone)
 static void steal_suitable_fallback(struct zone *zone, struct page *page,
                unsigned int alloc_flags, int start_type, bool whole_block)
 {
-       unsigned int current_order = page_order(page);
+       unsigned int current_order = buddy_order(page);
        int free_pages, movable_pages, alike_pages;
        int old_block_type;
 
@@ -8344,7 +8344,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
                 */
                if (!page_ref_count(page)) {
                        if (PageBuddy(page))
-                               iter += (1 << page_order(page)) - 1;
+                               iter += (1 << buddy_order(page)) - 1;
                        continue;
                }
 
@@ -8557,7 +8557,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
        }
 
        if (outer_start != start) {
-               order = page_order(pfn_to_page(outer_start));
+               order = buddy_order(pfn_to_page(outer_start));
 
                /*
                 * outer_start page could be small order buddy page and
@@ -8782,7 +8782,7 @@ void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
 
                BUG_ON(page_count(page));
                BUG_ON(!PageBuddy(page));
-               order = page_order(page);
+               order = buddy_order(page);
                del_page_from_free_list(page, zone, order);
                pfn += (1 << order);
        }
@@ -8801,7 +8801,7 @@ bool is_free_buddy_page(struct page *page)
        for (order = 0; order < MAX_ORDER; order++) {
                struct page *page_head = page - (pfn & ((1 << order) - 1));
 
-               if (PageBuddy(page_head) && page_order(page_head) >= order)
+               if (PageBuddy(page_head) && buddy_order(page_head) >= order)
                        break;
        }
        spin_unlock_irqrestore(&zone->lock, flags);
@@ -8838,7 +8838,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
 
                if (current_buddy != target) {
                        add_to_free_list(current_buddy, zone, high, migratetype);
-                       set_page_order(current_buddy, high);
+                       set_buddy_order(current_buddy, high);
                        page = next_page;
                }
        }
@@ -8858,16 +8858,16 @@ bool take_page_off_buddy(struct page *page)
        spin_lock_irqsave(&zone->lock, flags);
        for (order = 0; order < MAX_ORDER; order++) {
                struct page *page_head = page - (pfn & ((1 << order) - 1));
-               int buddy_order = page_order(page_head);
+               int page_order = buddy_order(page_head);
 
-               if (PageBuddy(page_head) && buddy_order >= order) {
+               if (PageBuddy(page_head) && page_order >= order) {
                        unsigned long pfn_head = page_to_pfn(page_head);
                        int migratetype = get_pfnblock_migratetype(page_head,
                                                                   pfn_head);
 
-                       del_page_from_free_list(page_head, zone, buddy_order);
+                       del_page_from_free_list(page_head, zone, page_order);
                        break_down_buddy_pages(zone, page_head, page, 0,
-                                               buddy_order, migratetype);
+                                               page_order, migratetype);
                        ret = true;
                        break;
                }
index 83692b93778465c97696077c966e82116a27b45c..abbf422144850977dfba2f657c63330e5d2da217 100644 (file)
@@ -88,7 +88,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
         * these pages to be merged.
         */
        if (PageBuddy(page)) {
-               order = page_order(page);
+               order = buddy_order(page);
                if (order >= pageblock_order) {
                        pfn = page_to_pfn(page);
                        buddy_pfn = __find_buddy_pfn(pfn, order);
@@ -261,7 +261,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
                         * the correct MIGRATE_ISOLATE freelist. There is no
                         * simple way to verify that as VM_BUG_ON(), though.
                         */
-                       pfn += 1 << page_order(page);
+                       pfn += 1 << buddy_order(page);
                else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
                        /* A HWPoisoned page cannot be also PageBuddy */
                        pfn++;
index 4ca3051a10358639bf50aaa6763174038a356dfd..b735a8eafcdbc605bc7e4e226fcbaf27abbcd4cb 100644 (file)
@@ -295,7 +295,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                        if (PageBuddy(page)) {
                                unsigned long freepage_order;
 
-                               freepage_order = page_order_unsafe(page);
+                               freepage_order = buddy_order_unsafe(page);
                                if (freepage_order < MAX_ORDER)
                                        pfn += (1UL << freepage_order) - 1;
                                continue;
@@ -490,7 +490,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 
                page = pfn_to_page(pfn);
                if (PageBuddy(page)) {
-                       unsigned long freepage_order = page_order_unsafe(page);
+                       unsigned long freepage_order = buddy_order_unsafe(page);
 
                        if (freepage_order < MAX_ORDER)
                                pfn += (1UL << freepage_order) - 1;
@@ -584,7 +584,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
                         * heavy lock contention.
                         */
                        if (PageBuddy(page)) {
-                               unsigned long order = page_order_unsafe(page);
+                               unsigned long order = buddy_order_unsafe(page);
 
                                if (order > 0 && order < MAX_ORDER)
                                        pfn += (1UL << order) - 1;
index aaaa3605123de75a27fb85e663568ab7825acf60..cd8e13d41df43c8e9617cebbe9ca3357b5a27320 100644 (file)
@@ -92,7 +92,7 @@ page_reporting_drain(struct page_reporting_dev_info *prdev,
                 * report on the new larger page when we make our way
                 * up to that higher order.
                 */
-               if (PageBuddy(page) && page_order(page) == order)
+               if (PageBuddy(page) && buddy_order(page) == order)
                        __SetPageReported(page);
        } while ((sg = sg_next(sg)));
 
index 9b5cd4b004b0f1d975624dd430ec971baf4dd1be..9c2e145a747affff680fef96931ad2c7718502a7 100644 (file)
@@ -60,7 +60,7 @@ static struct page * __meminit shuffle_valid_page(struct zone *zone,
         * ...is the page on the same list as the page we will
         * shuffle it with?
         */
-       if (page_order(page) != order)
+       if (buddy_order(page) != order)
                return NULL;
 
        return page;