]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - mm/vmstat.c
UBUNTU: Start new release
[mirror_ubuntu-zesty-kernel.git] / mm / vmstat.c
index 89cec42d19ffa8da5ad1e3c8e64ff4df1b3e562b..7c28df36f50ff009963ff1f04cf0be848dd0fa13 100644 (file)
@@ -1254,85 +1254,6 @@ static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
        return 0;
 }
 
-#ifdef CONFIG_PAGE_OWNER
-static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
-                                                       pg_data_t *pgdat,
-                                                       struct zone *zone)
-{
-       struct page *page;
-       struct page_ext *page_ext;
-       unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
-       unsigned long end_pfn = pfn + zone->spanned_pages;
-       unsigned long count[MIGRATE_TYPES] = { 0, };
-       int pageblock_mt, page_mt;
-       int i;
-
-       /* Scan block by block. First and last block may be incomplete */
-       pfn = zone->zone_start_pfn;
-
-       /*
-        * Walk the zone in pageblock_nr_pages steps. If a page block spans
-        * a zone boundary, it will be double counted between zones. This does
-        * not matter as the mixed block count will still be correct
-        */
-       for (; pfn < end_pfn; ) {
-               if (!pfn_valid(pfn)) {
-                       pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
-                       continue;
-               }
-
-               block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
-               block_end_pfn = min(block_end_pfn, end_pfn);
-
-               page = pfn_to_page(pfn);
-               pageblock_mt = get_pageblock_migratetype(page);
-
-               for (; pfn < block_end_pfn; pfn++) {
-                       if (!pfn_valid_within(pfn))
-                               continue;
-
-                       page = pfn_to_page(pfn);
-
-                       if (page_zone(page) != zone)
-                               continue;
-
-                       if (PageBuddy(page)) {
-                               pfn += (1UL << page_order(page)) - 1;
-                               continue;
-                       }
-
-                       if (PageReserved(page))
-                               continue;
-
-                       page_ext = lookup_page_ext(page);
-                       if (unlikely(!page_ext))
-                               continue;
-
-                       if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
-                               continue;
-
-                       page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
-                       if (pageblock_mt != page_mt) {
-                               if (is_migrate_cma(pageblock_mt))
-                                       count[MIGRATE_MOVABLE]++;
-                               else
-                                       count[pageblock_mt]++;
-
-                               pfn = block_end_pfn;
-                               break;
-                       }
-                       pfn += (1UL << page_ext->order) - 1;
-               }
-       }
-
-       /* Print counts */
-       seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
-       for (i = 0; i < MIGRATE_TYPES; i++)
-               seq_printf(m, "%12lu ", count[i]);
-       seq_putc(m, '\n');
-}
-#endif /* CONFIG_PAGE_OWNER */
-
 /*
  * Print out the number of pageblocks for each migratetype that contain pages
  * of other types. This gives an indication of how well fallbacks are being
@@ -1592,7 +1513,10 @@ static int vmstat_show(struct seq_file *m, void *arg)
 {
        unsigned long *l = arg;
        unsigned long off = l - (unsigned long *)m->private;
-       seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
+
+       seq_puts(m, vmstat_text[off]);
+       seq_put_decimal_ull(m, " ", *l);
+       seq_putc(m, '\n');
        return 0;
 }
 
@@ -1794,66 +1718,68 @@ static void __init start_shepherd_timer(void)
                round_jiffies_relative(sysctl_stat_interval));
 }
 
-static void vmstat_cpu_dead(int node)
+static void __init init_cpu_node_state(void)
 {
-       int cpu;
+       int node;
 
-       get_online_cpus();
-       for_each_online_cpu(cpu)
-               if (cpu_to_node(cpu) == node)
-                       goto end;
+       for_each_online_node(node) {
+               if (cpumask_weight(cpumask_of_node(node)) > 0)
+                       node_set_state(node, N_CPU);
+       }
+}
 
-       node_clear_state(node, N_CPU);
-end:
-       put_online_cpus();
+static int vmstat_cpu_online(unsigned int cpu)
+{
+       refresh_zone_stat_thresholds();
+       node_set_state(cpu_to_node(cpu), N_CPU);
+       return 0;
 }
 
-/*
- * Use the cpu notifier to insure that the thresholds are recalculated
- * when necessary.
- */
-static int vmstat_cpuup_callback(struct notifier_block *nfb,
-               unsigned long action,
-               void *hcpu)
-{
-       long cpu = (long)hcpu;
-
-       switch (action) {
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               refresh_zone_stat_thresholds();
-               node_set_state(cpu_to_node(cpu), N_CPU);
-               break;
-       case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
-               cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
-               break;
-       case CPU_DOWN_FAILED:
-       case CPU_DOWN_FAILED_FROZEN:
-               break;
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               refresh_zone_stat_thresholds();
-               vmstat_cpu_dead(cpu_to_node(cpu));
-               break;
-       default:
-               break;
-       }
-       return NOTIFY_OK;
+static int vmstat_cpu_down_prep(unsigned int cpu)
+{
+       cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
+       return 0;
+}
+
+static int vmstat_cpu_dead(unsigned int cpu)
+{
+       const struct cpumask *node_cpus;
+       int node;
+
+       node = cpu_to_node(cpu);
+
+       refresh_zone_stat_thresholds();
+       node_cpus = cpumask_of_node(node);
+       if (cpumask_weight(node_cpus) > 0)
+               return 0;
+
+       node_clear_state(node, N_CPU);
+       return 0;
 }
 
-static struct notifier_block vmstat_notifier =
-       { &vmstat_cpuup_callback, NULL, 0 };
 #endif
 
 static int __init setup_vmstat(void)
 {
 #ifdef CONFIG_SMP
-       cpu_notifier_register_begin();
-       __register_cpu_notifier(&vmstat_notifier);
+       int ret;
+
+       ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
+                                       NULL, vmstat_cpu_dead);
+       if (ret < 0)
+               pr_err("vmstat: failed to register 'dead' hotplug state\n");
+
+       ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
+                                       vmstat_cpu_online,
+                                       vmstat_cpu_down_prep);
+       if (ret < 0)
+               pr_err("vmstat: failed to register 'online' hotplug state\n");
+
+       get_online_cpus();
+       init_cpu_node_state();
+       put_online_cpus();
 
        start_shepherd_timer();
-       cpu_notifier_register_done();
 #endif
 #ifdef CONFIG_PROC_FS
        proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);