]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mm: vmstat: add cma statistics
authorMinchan Kim <minchan@kernel.org>
Wed, 5 May 2021 01:37:19 +0000 (18:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 5 May 2021 18:27:24 +0000 (11:27 -0700)
Since CMA is used more widely, it's worth to have CMA allocation
statistics into vmstat.  With it, we could know how agressively system
uses cma allocation and how often it fails.

Link: https://lkml.kernel.org/r/20210302183346.3707237-1-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Cc: John Dias <joaodias@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/vm_event_item.h
mm/cma.c
mm/vmstat.c

index 18e75974d4e37bd76f6b31d88951a9cededaec63..21d7c7f72f1c728d03ee6274fe2cde5287d5a9b9 100644 (file)
@@ -70,6 +70,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 #endif
 #ifdef CONFIG_HUGETLB_PAGE
                HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
+#endif
+#ifdef CONFIG_CMA
+               CMA_ALLOC_SUCCESS,
+               CMA_ALLOC_FAIL,
 #endif
                UNEVICTABLE_PGCULLED,   /* culled to noreclaim list */
                UNEVICTABLE_PGSCANNED,  /* scanned for reclaimability */
index acd6991f77a00111185eec72224ccdbb7ab512c7..b44a71eb31748d97bc8f33056488bfb45ff948fe 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -435,13 +435,13 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
        int ret = -ENOMEM;
 
        if (!cma || !cma->count || !cma->bitmap)
-               return NULL;
+               goto out;
 
        pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
                 count, align);
 
        if (!count)
-               return NULL;
+               goto out;
 
        mask = cma_bitmap_aligned_mask(cma, align);
        offset = cma_bitmap_aligned_offset(cma, align);
@@ -449,7 +449,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
        bitmap_count = cma_bitmap_pages_to_bits(cma, count);
 
        if (bitmap_count > bitmap_maxno)
-               return NULL;
+               goto out;
 
        for (;;) {
                spin_lock_irq(&cma->lock);
@@ -506,6 +506,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
        }
 
        pr_debug("%s(): returned %p\n", __func__, page);
+out:
+       if (page)
+               count_vm_event(CMA_ALLOC_SUCCESS);
+       else
+               count_vm_event(CMA_ALLOC_FAIL);
+
        return page;
 }
 
index 74b2c374b86c8a1728a9a7e118af830796ea9691..49a8456ec079bbf882fbf034e803c993c9557abd 100644 (file)
@@ -1312,6 +1312,10 @@ const char * const vmstat_text[] = {
 #ifdef CONFIG_HUGETLB_PAGE
        "htlb_buddy_alloc_success",
        "htlb_buddy_alloc_fail",
+#endif
+#ifdef CONFIG_CMA
+       "cma_alloc_success",
+       "cma_alloc_fail",
 #endif
        "unevictable_pgs_culled",
        "unevictable_pgs_scanned",