]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blobdiff - mm/page_alloc.c
mm, oom: give __GFP_NOFAIL allocations access to memory reserves
[mirror_ubuntu-focal-kernel.git] / mm / page_alloc.c
index 1e9a560654002cb0138316b6b2e0e9c65586280c..ce63d603820f1963fac120ce76e20583ca695f6e 100644 (file)
@@ -805,7 +805,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                do {
                        int mt; /* migratetype of the to-be-freed page */
 
-                       page = list_entry(list->prev, struct page, lru);
+                       page = list_last_entry(list, struct page, lru);
                        /* must delete as __free_one_page list manipulates */
                        list_del(&page->lru);
 
@@ -1410,11 +1410,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
        /* Find a page of the appropriate size in the preferred list */
        for (current_order = order; current_order < MAX_ORDER; ++current_order) {
                area = &(zone->free_area[current_order]);
-               if (list_empty(&area->free_list[migratetype]))
-                       continue;
-
-               page = list_entry(area->free_list[migratetype].next,
+               page = list_first_entry_or_null(&area->free_list[migratetype],
                                                        struct page, lru);
+               if (!page)
+                       continue;
                list_del(&page->lru);
                rmv_page_order(page);
                area->nr_free--;
@@ -1693,12 +1692,12 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
                for (order = 0; order < MAX_ORDER; order++) {
                        struct free_area *area = &(zone->free_area[order]);
 
-                       if (list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
+                       page = list_first_entry_or_null(
+                                       &area->free_list[MIGRATE_HIGHATOMIC],
+                                       struct page, lru);
+                       if (!page)
                                continue;
 
-                       page = list_entry(area->free_list[MIGRATE_HIGHATOMIC].next,
-                                               struct page, lru);
-
                        /*
                         * It should never happen but changes to locking could
                         * inadvertently allow a per-cpu drain to add pages
@@ -1746,7 +1745,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
                if (fallback_mt == -1)
                        continue;
 
-               page = list_entry(area->free_list[fallback_mt].next,
+               page = list_first_entry(&area->free_list[fallback_mt],
                                                struct page, lru);
                if (can_steal)
                        steal_suitable_fallback(zone, page, start_migratetype);
@@ -1781,7 +1780,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
  * Call me with the zone->lock already held.
  */
 static struct page *__rmqueue(struct zone *zone, unsigned int order,
-                               int migratetype, gfp_t gfp_flags)
+                               int migratetype)
 {
        struct page *page;
 
@@ -1811,7 +1810,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
 
        spin_lock(&zone->lock);
        for (i = 0; i < count; ++i) {
-               struct page *page = __rmqueue(zone, order, migratetype, 0);
+               struct page *page = __rmqueue(zone, order, migratetype);
                if (unlikely(page == NULL))
                        break;
 
@@ -1981,7 +1980,7 @@ void mark_free_pages(struct zone *zone)
        unsigned long pfn, max_zone_pfn;
        unsigned long flags;
        unsigned int order, t;
-       struct list_head *curr;
+       struct page *page;
 
        if (zone_is_empty(zone))
                return;
@@ -1991,17 +1990,17 @@ void mark_free_pages(struct zone *zone)
        max_zone_pfn = zone_end_pfn(zone);
        for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
                if (pfn_valid(pfn)) {
-                       struct page *page = pfn_to_page(pfn);
-
+                       page = pfn_to_page(pfn);
                        if (!swsusp_page_is_forbidden(page))
                                swsusp_unset_page_free(page);
                }
 
        for_each_migratetype_order(order, t) {
-               list_for_each(curr, &zone->free_area[order].free_list[t]) {
+               list_for_each_entry(page,
+                               &zone->free_area[order].free_list[t], lru) {
                        unsigned long i;
 
-                       pfn = page_to_pfn(list_entry(curr, struct page, lru));
+                       pfn = page_to_pfn(page);
                        for (i = 0; i < (1UL << order); i++)
                                swsusp_set_page_free(pfn_to_page(pfn + i));
                }
@@ -2205,9 +2204,9 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                }
 
                if (cold)
-                       page = list_entry(list->prev, struct page, lru);
+                       page = list_last_entry(list, struct page, lru);
                else
-                       page = list_entry(list->next, struct page, lru);
+                       page = list_first_entry(list, struct page, lru);
 
                list_del(&page->lru);
                pcp->count--;
@@ -2234,7 +2233,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                                trace_mm_page_alloc_zone_locked(page, order, migratetype);
                }
                if (!page)
-                       page = __rmqueue(zone, order, migratetype, gfp_flags);
+                       page = __rmqueue(zone, order, migratetype);
                spin_unlock(&zone->lock);
                if (!page)
                        goto failed;
@@ -2733,8 +2732,21 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
                        goto out;
        }
        /* Exhausted what can be done so it's blamo time */
-       if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
+       if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
                *did_some_progress = 1;
+
+               if (gfp_mask & __GFP_NOFAIL) {
+                       page = get_page_from_freelist(gfp_mask, order,
+                                       ALLOC_NO_WATERMARKS|ALLOC_CPUSET, ac);
+                       /*
+                        * fallback to ignore cpuset restriction if our nodes
+                        * are depleted
+                        */
+                       if (!page)
+                               page = get_page_from_freelist(gfp_mask, order,
+                                       ALLOC_NO_WATERMARKS, ac);
+               }
+       }
 out:
        mutex_unlock(&oom_lock);
        return page;