]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - mm/memcontrol.c
Merge tag 'trace-v4.12-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[mirror_ubuntu-artful-kernel.git] / mm / memcontrol.c
index 2bd7541d7c11231431c060ca6cfe84a89f096fe3..ff73899af61a2c25582a66f56d473b0da6ce1b9c 100644 (file)
@@ -100,24 +100,7 @@ static bool do_memsw_account(void)
        return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
 }
 
-static const char * const mem_cgroup_stat_names[] = {
-       "cache",
-       "rss",
-       "rss_huge",
-       "mapped_file",
-       "dirty",
-       "writeback",
-       "swap",
-};
-
-static const char * const mem_cgroup_events_names[] = {
-       "pgpgin",
-       "pgpgout",
-       "pgfault",
-       "pgmajfault",
-};
-
-static const char * const mem_cgroup_lru_names[] = {
+static const char *const mem_cgroup_lru_names[] = {
        "inactive_anon",
        "active_anon",
        "inactive_file",
@@ -568,32 +551,15 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
  * common workload, threshold and synchronization as vmstat[] should be
  * implemented.
  */
-static unsigned long
-mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
-{
-       long val = 0;
-       int cpu;
-
-       /* Per-cpu values can be negative, use a signed accumulator */
-       for_each_possible_cpu(cpu)
-               val += per_cpu(memcg->stat->count[idx], cpu);
-       /*
-        * Summing races with updates, so val may be negative.  Avoid exposing
-        * transient negative values.
-        */
-       if (val < 0)
-               val = 0;
-       return val;
-}
 
-static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
-                                           enum mem_cgroup_events_index idx)
+static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
+                                     enum memcg_event_item event)
 {
        unsigned long val = 0;
        int cpu;
 
        for_each_possible_cpu(cpu)
-               val += per_cpu(memcg->stat->events[idx], cpu);
+               val += per_cpu(memcg->stat->events[event], cpu);
        return val;
 }
 
@@ -606,23 +572,23 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
         * counted as CACHE even if it's on ANON LRU.
         */
        if (PageAnon(page))
-               __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
-                               nr_pages);
-       else
-               __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
-                               nr_pages);
+               __this_cpu_add(memcg->stat->count[MEMCG_RSS], nr_pages);
+       else {
+               __this_cpu_add(memcg->stat->count[MEMCG_CACHE], nr_pages);
+               if (PageSwapBacked(page))
+                       __this_cpu_add(memcg->stat->count[NR_SHMEM], nr_pages);
+       }
 
        if (compound) {
                VM_BUG_ON_PAGE(!PageTransHuge(page), page);
-               __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
-                               nr_pages);
+               __this_cpu_add(memcg->stat->count[MEMCG_RSS_HUGE], nr_pages);
        }
 
        /* pagein of a big page is an event. So, ignore page size */
        if (nr_pages > 0)
-               __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
+               __this_cpu_inc(memcg->stat->events[PGPGIN]);
        else {
-               __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
+               __this_cpu_inc(memcg->stat->events[PGPGOUT]);
                nr_pages = -nr_pages; /* for event */
        }
 
@@ -1144,6 +1110,28 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
        return false;
 }
 
+unsigned int memcg1_stats[] = {
+       MEMCG_CACHE,
+       MEMCG_RSS,
+       MEMCG_RSS_HUGE,
+       NR_SHMEM,
+       NR_FILE_MAPPED,
+       NR_FILE_DIRTY,
+       NR_WRITEBACK,
+       MEMCG_SWAP,
+};
+
+static const char *const memcg1_stat_names[] = {
+       "cache",
+       "rss",
+       "rss_huge",
+       "shmem",
+       "mapped_file",
+       "dirty",
+       "writeback",
+       "swap",
+};
+
 #define K(x) ((x) << (PAGE_SHIFT-10))
 /**
  * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
@@ -1188,11 +1176,11 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
                pr_cont_cgroup_path(iter->css.cgroup);
                pr_cont(":");
 
-               for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-                       if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
+               for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
+                       if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
                                continue;
-                       pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
-                               K(mem_cgroup_read_stat(iter, i)));
+                       pr_cont(" %s:%luKB", memcg1_stat_names[i],
+                               K(memcg_page_state(iter, memcg1_stats[i])));
                }
 
                for (i = 0; i < NR_LRU_LISTS; i++)
@@ -1837,7 +1825,7 @@ static void reclaim_high(struct mem_cgroup *memcg,
        do {
                if (page_counter_read(&memcg->memory) <= memcg->high)
                        continue;
-               mem_cgroup_events(memcg, MEMCG_HIGH, 1);
+               mem_cgroup_event(memcg, MEMCG_HIGH);
                try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
        } while ((memcg = parent_mem_cgroup(memcg)));
 }
@@ -1928,7 +1916,7 @@ retry:
        if (!gfpflags_allow_blocking(gfp_mask))
                goto nomem;
 
-       mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
+       mem_cgroup_event(mem_over_limit, MEMCG_MAX);
 
        nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
                                                    gfp_mask, may_swap);
@@ -1971,7 +1959,7 @@ retry:
        if (fatal_signal_pending(current))
                goto force;
 
-       mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
+       mem_cgroup_event(mem_over_limit, MEMCG_OOM);
 
        mem_cgroup_oom(mem_over_limit, gfp_mask,
                       get_order(nr_pages * PAGE_SIZE));
@@ -2381,7 +2369,7 @@ void mem_cgroup_split_huge_fixup(struct page *head)
        for (i = 1; i < HPAGE_PMD_NR; i++)
                head[i].mem_cgroup = head->mem_cgroup;
 
-       __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
+       __this_cpu_sub(head->mem_cgroup->stat->count[MEMCG_RSS_HUGE],
                       HPAGE_PMD_NR);
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -2391,7 +2379,7 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
                                         bool charge)
 {
        int val = (charge) ? 1 : -1;
-       this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
+       this_cpu_add(memcg->stat->count[MEMCG_SWAP], val);
 }
 
 /**
@@ -2725,7 +2713,7 @@ static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
 
        for_each_mem_cgroup_tree(iter, memcg) {
                for (i = 0; i < MEMCG_NR_STAT; i++)
-                       stat[i] += mem_cgroup_read_stat(iter, i);
+                       stat[i] += memcg_page_state(iter, i);
        }
 }
 
@@ -2738,7 +2726,7 @@ static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
 
        for_each_mem_cgroup_tree(iter, memcg) {
                for (i = 0; i < MEMCG_NR_EVENTS; i++)
-                       events[i] += mem_cgroup_read_events(iter, i);
+                       events[i] += memcg_sum_events(iter, i);
        }
 }
 
@@ -2750,13 +2738,10 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
                struct mem_cgroup *iter;
 
                for_each_mem_cgroup_tree(iter, memcg) {
-                       val += mem_cgroup_read_stat(iter,
-                                       MEM_CGROUP_STAT_CACHE);
-                       val += mem_cgroup_read_stat(iter,
-                                       MEM_CGROUP_STAT_RSS);
+                       val += memcg_page_state(iter, MEMCG_CACHE);
+                       val += memcg_page_state(iter, MEMCG_RSS);
                        if (swap)
-                               val += mem_cgroup_read_stat(iter,
-                                               MEM_CGROUP_STAT_SWAP);
+                               val += memcg_page_state(iter, MEMCG_SWAP);
                }
        } else {
                if (!swap)
@@ -3131,6 +3116,21 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
 }
 #endif /* CONFIG_NUMA */
 
+/* Universal VM events cgroup1 shows, original sort order */
+unsigned int memcg1_events[] = {
+       PGPGIN,
+       PGPGOUT,
+       PGFAULT,
+       PGMAJFAULT,
+};
+
+static const char *const memcg1_event_names[] = {
+       "pgpgin",
+       "pgpgout",
+       "pgfault",
+       "pgmajfault",
+};
+
 static int memcg_stat_show(struct seq_file *m, void *v)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
@@ -3138,22 +3138,20 @@ static int memcg_stat_show(struct seq_file *m, void *v)
        struct mem_cgroup *mi;
        unsigned int i;
 
-       BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
-                    MEM_CGROUP_STAT_NSTATS);
-       BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
-                    MEM_CGROUP_EVENTS_NSTATS);
+       BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
        BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
 
-       for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-               if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
+       for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
+               if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
                        continue;
-               seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
-                          mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
+               seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
+                          memcg_page_state(memcg, memcg1_stats[i]) *
+                          PAGE_SIZE);
        }
 
-       for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
-               seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
-                          mem_cgroup_read_events(memcg, i));
+       for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
+               seq_printf(m, "%s %lu\n", memcg1_event_names[i],
+                          memcg_sum_events(memcg, memcg1_events[i]));
 
        for (i = 0; i < NR_LRU_LISTS; i++)
                seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
@@ -3171,23 +3169,23 @@ static int memcg_stat_show(struct seq_file *m, void *v)
                seq_printf(m, "hierarchical_memsw_limit %llu\n",
                           (u64)memsw * PAGE_SIZE);
 
-       for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
+       for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
                unsigned long long val = 0;
 
-               if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
+               if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
                        continue;
                for_each_mem_cgroup_tree(mi, memcg)
-                       val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
-               seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
+                       val += memcg_page_state(mi, memcg1_stats[i]) *
+                       PAGE_SIZE;
+               seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
        }
 
-       for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
+       for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
                unsigned long long val = 0;
 
                for_each_mem_cgroup_tree(mi, memcg)
-                       val += mem_cgroup_read_events(mi, i);
-               seq_printf(m, "total_%s %llu\n",
-                          mem_cgroup_events_names[i], val);
+                       val += memcg_sum_events(mi, memcg1_events[i]);
+               seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
        }
 
        for (i = 0; i < NR_LRU_LISTS; i++) {
@@ -3652,10 +3650,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
        struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
        struct mem_cgroup *parent;
 
-       *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
+       *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
 
        /* this should eventually include NR_UNSTABLE_NFS */
-       *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
+       *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
        *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
                                                     (1 << LRU_ACTIVE_FILE));
        *pheadroom = PAGE_COUNTER_MAX;
@@ -4511,33 +4509,29 @@ static int mem_cgroup_move_account(struct page *page,
        spin_lock_irqsave(&from->move_lock, flags);
 
        if (!anon && page_mapped(page)) {
-               __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
-                              nr_pages);
-               __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
-                              nr_pages);
+               __this_cpu_sub(from->stat->count[NR_FILE_MAPPED], nr_pages);
+               __this_cpu_add(to->stat->count[NR_FILE_MAPPED], nr_pages);
        }
 
        /*
         * move_lock grabbed above and caller set from->moving_account, so
-        * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
+        * mod_memcg_page_state will serialize updates to PageDirty.
         * So mapping should be stable for dirty pages.
         */
        if (!anon && PageDirty(page)) {
                struct address_space *mapping = page_mapping(page);
 
                if (mapping_cap_account_dirty(mapping)) {
-                       __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
+                       __this_cpu_sub(from->stat->count[NR_FILE_DIRTY],
                                       nr_pages);
-                       __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
+                       __this_cpu_add(to->stat->count[NR_FILE_DIRTY],
                                       nr_pages);
                }
        }
 
        if (PageWriteback(page)) {
-               __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
-                              nr_pages);
-               __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
-                              nr_pages);
+               __this_cpu_sub(from->stat->count[NR_WRITEBACK], nr_pages);
+               __this_cpu_add(to->stat->count[NR_WRITEBACK], nr_pages);
        }
 
        /*
@@ -5154,7 +5148,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
                        continue;
                }
 
-               mem_cgroup_events(memcg, MEMCG_OOM, 1);
+               mem_cgroup_event(memcg, MEMCG_OOM);
                if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
                        break;
        }
@@ -5167,10 +5161,10 @@ static int memory_events_show(struct seq_file *m, void *v)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
 
-       seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
-       seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
-       seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
-       seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
+       seq_printf(m, "low %lu\n", memcg_sum_events(memcg, MEMCG_LOW));
+       seq_printf(m, "high %lu\n", memcg_sum_events(memcg, MEMCG_HIGH));
+       seq_printf(m, "max %lu\n", memcg_sum_events(memcg, MEMCG_MAX));
+       seq_printf(m, "oom %lu\n", memcg_sum_events(memcg, MEMCG_OOM));
 
        return 0;
 }
@@ -5197,9 +5191,9 @@ static int memory_stat_show(struct seq_file *m, void *v)
        tree_events(memcg, events);
 
        seq_printf(m, "anon %llu\n",
-                  (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
+                  (u64)stat[MEMCG_RSS] * PAGE_SIZE);
        seq_printf(m, "file %llu\n",
-                  (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
+                  (u64)stat[MEMCG_CACHE] * PAGE_SIZE);
        seq_printf(m, "kernel_stack %llu\n",
                   (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
        seq_printf(m, "slab %llu\n",
@@ -5208,12 +5202,14 @@ static int memory_stat_show(struct seq_file *m, void *v)
        seq_printf(m, "sock %llu\n",
                   (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
 
+       seq_printf(m, "shmem %llu\n",
+                  (u64)stat[NR_SHMEM] * PAGE_SIZE);
        seq_printf(m, "file_mapped %llu\n",
-                  (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
+                  (u64)stat[NR_FILE_MAPPED] * PAGE_SIZE);
        seq_printf(m, "file_dirty %llu\n",
-                  (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
+                  (u64)stat[NR_FILE_DIRTY] * PAGE_SIZE);
        seq_printf(m, "file_writeback %llu\n",
-                  (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
+                  (u64)stat[NR_WRITEBACK] * PAGE_SIZE);
 
        for (i = 0; i < NR_LRU_LISTS; i++) {
                struct mem_cgroup *mi;
@@ -5232,10 +5228,15 @@ static int memory_stat_show(struct seq_file *m, void *v)
 
        /* Accumulated memory events */
 
-       seq_printf(m, "pgfault %lu\n",
-                  events[MEM_CGROUP_EVENTS_PGFAULT]);
-       seq_printf(m, "pgmajfault %lu\n",
-                  events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
+       seq_printf(m, "pgfault %lu\n", events[PGFAULT]);
+       seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
+
+       seq_printf(m, "workingset_refault %lu\n",
+                  stat[WORKINGSET_REFAULT]);
+       seq_printf(m, "workingset_activate %lu\n",
+                  stat[WORKINGSET_ACTIVATE]);
+       seq_printf(m, "workingset_nodereclaim %lu\n",
+                  stat[WORKINGSET_NODERECLAIM]);
 
        return 0;
 }
@@ -5476,8 +5477,8 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
 
 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
                           unsigned long nr_anon, unsigned long nr_file,
-                          unsigned long nr_huge, unsigned long nr_kmem,
-                          struct page *dummy_page)
+                          unsigned long nr_kmem, unsigned long nr_huge,
+                          unsigned long nr_shmem, struct page *dummy_page)
 {
        unsigned long nr_pages = nr_anon + nr_file + nr_kmem;
        unsigned long flags;
@@ -5492,10 +5493,11 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
        }
 
        local_irq_save(flags);
-       __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
-       __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
-       __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
-       __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
+       __this_cpu_sub(memcg->stat->count[MEMCG_RSS], nr_anon);
+       __this_cpu_sub(memcg->stat->count[MEMCG_CACHE], nr_file);
+       __this_cpu_sub(memcg->stat->count[MEMCG_RSS_HUGE], nr_huge);
+       __this_cpu_sub(memcg->stat->count[NR_SHMEM], nr_shmem);
+       __this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout);
        __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
        memcg_check_events(memcg, dummy_page);
        local_irq_restore(flags);
@@ -5507,6 +5509,7 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
 static void uncharge_list(struct list_head *page_list)
 {
        struct mem_cgroup *memcg = NULL;
+       unsigned long nr_shmem = 0;
        unsigned long nr_anon = 0;
        unsigned long nr_file = 0;
        unsigned long nr_huge = 0;
@@ -5539,9 +5542,9 @@ static void uncharge_list(struct list_head *page_list)
                if (memcg != page->mem_cgroup) {
                        if (memcg) {
                                uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
-                                              nr_huge, nr_kmem, page);
-                               pgpgout = nr_anon = nr_file =
-                                       nr_huge = nr_kmem = 0;
+                                              nr_kmem, nr_huge, nr_shmem, page);
+                               pgpgout = nr_anon = nr_file = nr_kmem = 0;
+                               nr_huge = nr_shmem = 0;
                        }
                        memcg = page->mem_cgroup;
                }
@@ -5555,8 +5558,11 @@ static void uncharge_list(struct list_head *page_list)
                        }
                        if (PageAnon(page))
                                nr_anon += nr_pages;
-                       else
+                       else {
                                nr_file += nr_pages;
+                               if (PageSwapBacked(page))
+                                       nr_shmem += nr_pages;
+                       }
                        pgpgout++;
                } else {
                        nr_kmem += 1 << compound_order(page);
@@ -5568,7 +5574,7 @@ static void uncharge_list(struct list_head *page_list)
 
        if (memcg)
                uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
-                              nr_huge, nr_kmem, page);
+                              nr_kmem, nr_huge, nr_shmem, page);
 }
 
 /**