]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
mm: optimize put_mems_allowed() usage
authorMel Gorman <mgorman@suse.de>
Thu, 3 Apr 2014 21:47:24 +0000 (14:47 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Apr 2014 23:20:58 +0000 (16:20 -0700)
Since put_mems_allowed() is strictly optional, its a seqcount retry, we
don't need to evaluate the function if the allocation was in fact
successful, saving a smp_rmb some loads and comparisons on some relative
fast-paths.

Since the naming, get/put_mems_allowed() does suggest a mandatory
pairing, rename the interface, as suggested by Mel, to resemble the
seqcount interface.

This gives us: read_mems_allowed_begin() and read_mems_allowed_retry(),
where it is important to note that the return value of the latter call
is inverted from its previous incarnation.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/cpuset.h
kernel/cpuset.c
mm/filemap.c
mm/hugetlb.c
mm/mempolicy.c
mm/page_alloc.c
mm/slab.c
mm/slub.c

index 3fe661fe96d13e534f2b1ece47a409b1609f055c..b19d3dc2e6519498c9e32c07ee23275060bbc5d1 100644 (file)
@@ -87,25 +87,26 @@ extern void rebuild_sched_domains(void);
 extern void cpuset_print_task_mems_allowed(struct task_struct *p);
 
 /*
- * get_mems_allowed is required when making decisions involving mems_allowed
- * such as during page allocation. mems_allowed can be updated in parallel
- * and depending on the new value an operation can fail potentially causing
- * process failure. A retry loop with get_mems_allowed and put_mems_allowed
- * prevents these artificial failures.
+ * read_mems_allowed_begin is required when making decisions involving
+ * mems_allowed such as during page allocation. mems_allowed can be updated in
+ * parallel and depending on the new value an operation can fail potentially
+ * causing process failure. A retry loop with read_mems_allowed_begin and
+ * read_mems_allowed_retry prevents these artificial failures.
  */
-static inline unsigned int get_mems_allowed(void)
+static inline unsigned int read_mems_allowed_begin(void)
 {
        return read_seqcount_begin(&current->mems_allowed_seq);
 }
 
 /*
- * If this returns false, the operation that took place after get_mems_allowed
- * may have failed. It is up to the caller to retry the operation if
+ * If this returns true, the operation that took place after
+ * read_mems_allowed_begin may have failed artificially due to a concurrent
+ * update of mems_allowed. It is up to the caller to retry the operation if
  * appropriate.
  */
-static inline bool put_mems_allowed(unsigned int seq)
+static inline bool read_mems_allowed_retry(unsigned int seq)
 {
-       return !read_seqcount_retry(&current->mems_allowed_seq, seq);
+       return read_seqcount_retry(&current->mems_allowed_seq, seq);
 }
 
 static inline void set_mems_allowed(nodemask_t nodemask)
@@ -225,14 +226,14 @@ static inline void set_mems_allowed(nodemask_t nodemask)
 {
 }
 
-static inline unsigned int get_mems_allowed(void)
+static inline unsigned int read_mems_allowed_begin(void)
 {
        return 0;
 }
 
-static inline bool put_mems_allowed(unsigned int seq)
+static inline bool read_mems_allowed_retry(unsigned int seq)
 {
-       return true;
+       return false;
 }
 
 #endif /* !CONFIG_CPUSETS */
index e6b1b66afe526acfa2a9ecbfc5bad9da76e8d9b4..f6fc7475f1a1f3c3b6596b4266876959e6577944 100644 (file)
@@ -1022,7 +1022,7 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
        task_lock(tsk);
        /*
         * Determine if a loop is necessary if another thread is doing
-        * get_mems_allowed().  If at least one node remains unchanged and
+        * read_mems_allowed_begin().  If at least one node remains unchanged and
         * tsk does not have a mempolicy, then an empty nodemask will not be
         * possible when mems_allowed is larger than a word.
         */
index 7a13f6ac5421b9fead7729df856556b12269ad8c..068cd2a63d3252b727a52c494ec619804ea453f7 100644 (file)
@@ -520,10 +520,10 @@ struct page *__page_cache_alloc(gfp_t gfp)
        if (cpuset_do_page_mem_spread()) {
                unsigned int cpuset_mems_cookie;
                do {
-                       cpuset_mems_cookie = get_mems_allowed();
+                       cpuset_mems_cookie = read_mems_allowed_begin();
                        n = cpuset_mem_spread_node();
                        page = alloc_pages_exact_node(n, gfp, 0);
-               } while (!put_mems_allowed(cpuset_mems_cookie) && !page);
+               } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
 
                return page;
        }
index c01cb9fedb18f8495c815d4caa380580734e1c8e..139b7462203bd715aefd224895e034b690a63bcf 100644 (file)
@@ -540,7 +540,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
                goto err;
 
 retry_cpuset:
-       cpuset_mems_cookie = get_mems_allowed();
+       cpuset_mems_cookie = read_mems_allowed_begin();
        zonelist = huge_zonelist(vma, address,
                                        htlb_alloc_mask(h), &mpol, &nodemask);
 
@@ -562,7 +562,7 @@ retry_cpuset:
        }
 
        mpol_cond_put(mpol);
-       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+       if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;
        return page;
 
index 4755c857694246ca6365a382931bcd431af9bda2..e3ab02822799d2527949fc4476a380820aec5371 100644 (file)
@@ -1899,7 +1899,7 @@ int node_random(const nodemask_t *maskp)
  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
  * @nodemask for filtering the zonelist.
  *
- * Must be protected by get_mems_allowed()
+ * Must be protected by read_mems_allowed_begin()
  */
 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
                                gfp_t gfp_flags, struct mempolicy **mpol,
@@ -2063,7 +2063,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 
 retry_cpuset:
        pol = get_vma_policy(current, vma, addr);
-       cpuset_mems_cookie = get_mems_allowed();
+       cpuset_mems_cookie = read_mems_allowed_begin();
 
        if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
                unsigned nid;
@@ -2071,7 +2071,7 @@ retry_cpuset:
                nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
                mpol_cond_put(pol);
                page = alloc_page_interleave(gfp, order, nid);
-               if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+               if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                        goto retry_cpuset;
 
                return page;
@@ -2081,7 +2081,7 @@ retry_cpuset:
                                      policy_nodemask(gfp, pol));
        if (unlikely(mpol_needs_cond_ref(pol)))
                __mpol_put(pol);
-       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+       if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;
        return page;
 }
@@ -2115,7 +2115,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
                pol = &default_policy;
 
 retry_cpuset:
-       cpuset_mems_cookie = get_mems_allowed();
+       cpuset_mems_cookie = read_mems_allowed_begin();
 
        /*
         * No reference counting needed for current->mempolicy
@@ -2128,7 +2128,7 @@ retry_cpuset:
                                policy_zonelist(gfp, pol, numa_node_id()),
                                policy_nodemask(gfp, pol));
 
-       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+       if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;
 
        return page;
index 3bac76ae4b30ec8a62042bdff9644e87ee181d3c..979378deccbfcc22fc1a120e08248dbad1053ab3 100644 (file)
@@ -2739,7 +2739,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                return NULL;
 
 retry_cpuset:
-       cpuset_mems_cookie = get_mems_allowed();
+       cpuset_mems_cookie = read_mems_allowed_begin();
 
        /* The preferred zone is used for statistics later */
        first_zones_zonelist(zonelist, high_zoneidx,
@@ -2777,7 +2777,7 @@ out:
         * the mask is being updated. If a page allocation is about to fail,
         * check if the cpuset changed during allocation and if so, retry.
         */
-       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+       if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;
 
        memcg_kmem_commit_charge(page, memcg, order);
@@ -3045,9 +3045,9 @@ bool skip_free_areas_node(unsigned int flags, int nid)
                goto out;
 
        do {
-               cpuset_mems_cookie = get_mems_allowed();
+               cpuset_mems_cookie = read_mems_allowed_begin();
                ret = !node_isset(nid, cpuset_current_mems_allowed);
-       } while (!put_mems_allowed(cpuset_mems_cookie));
+       } while (read_mems_allowed_retry(cpuset_mems_cookie));
 out:
        return ret;
 }
index b264214c77ead2f8426ae476354231dc7c52169a..9153c802e2fee1898c62ea1cdcb5efecb3bbc2ed 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3073,7 +3073,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
        local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
 
 retry_cpuset:
-       cpuset_mems_cookie = get_mems_allowed();
+       cpuset_mems_cookie = read_mems_allowed_begin();
        zonelist = node_zonelist(slab_node(), flags);
 
 retry:
@@ -3131,7 +3131,7 @@ retry:
                }
        }
 
-       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
+       if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;
        return obj;
 }
index 25f14ad8f817443bda93901aa8ab0b626280f297..7611f148ee81e1b7f4f86c19358e1fff07c75f9a 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1684,7 +1684,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
                return NULL;
 
        do {
-               cpuset_mems_cookie = get_mems_allowed();
+               cpuset_mems_cookie = read_mems_allowed_begin();
                zonelist = node_zonelist(slab_node(), flags);
                for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
                        struct kmem_cache_node *n;
@@ -1696,19 +1696,17 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
                                object = get_partial_node(s, n, c, flags);
                                if (object) {
                                        /*
-                                        * Return the object even if
-                                        * put_mems_allowed indicated that
-                                        * the cpuset mems_allowed was
-                                        * updated in parallel. It's a
-                                        * harmless race between the alloc
-                                        * and the cpuset update.
+                                        * Don't check read_mems_allowed_retry()
+                                        * here - if mems_allowed was updated in
+                                        * parallel, that was a harmless race
+                                        * between allocation and the cpuset
+                                        * update
                                         */
-                                       put_mems_allowed(cpuset_mems_cookie);
                                        return object;
                                }
                        }
                }
-       } while (!put_mems_allowed(cpuset_mems_cookie));
+       } while (read_mems_allowed_retry(cpuset_mems_cookie));
 #endif
        return NULL;
 }