]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blobdiff - mm/zsmalloc.c
scsi: aha152x: Fix aha152x_setup() __setup handler return value
[mirror_ubuntu-focal-kernel.git] / mm / zsmalloc.c
index 2b2b9aae8a3c63dfc3c8a1b0551ec8c415d69567..490e5f3ae614ad82f21aa36a4233547f6d953435 100644 (file)
@@ -293,11 +293,7 @@ struct zspage {
 };
 
 struct mapping_area {
-#ifdef CONFIG_PGTABLE_MAPPING
-       struct vm_struct *vm; /* vm area for mapping object that span pages */
-#else
        char *vm_buf; /* copy buffer for objects that span pages */
-#endif
        char *vm_addr; /* address of kmap_atomic()'ed pages */
        enum zs_mapmode vm_mm; /* mapping mode */
 };
@@ -1113,46 +1109,6 @@ static struct zspage *find_get_zspage(struct size_class *class)
        return zspage;
 }
 
-#ifdef CONFIG_PGTABLE_MAPPING
-static inline int __zs_cpu_up(struct mapping_area *area)
-{
-       /*
-        * Make sure we don't leak memory if a cpu UP notification
-        * and zs_init() race and both call zs_cpu_up() on the same cpu
-        */
-       if (area->vm)
-               return 0;
-       area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL);
-       if (!area->vm)
-               return -ENOMEM;
-       return 0;
-}
-
-static inline void __zs_cpu_down(struct mapping_area *area)
-{
-       if (area->vm)
-               free_vm_area(area->vm);
-       area->vm = NULL;
-}
-
-static inline void *__zs_map_object(struct mapping_area *area,
-                               struct page *pages[2], int off, int size)
-{
-       BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages));
-       area->vm_addr = area->vm->addr;
-       return area->vm_addr + off;
-}
-
-static inline void __zs_unmap_object(struct mapping_area *area,
-                               struct page *pages[2], int off, int size)
-{
-       unsigned long addr = (unsigned long)area->vm_addr;
-
-       unmap_kernel_range(addr, PAGE_SIZE * 2);
-}
-
-#else /* CONFIG_PGTABLE_MAPPING */
-
 static inline int __zs_cpu_up(struct mapping_area *area)
 {
        /*
@@ -1233,8 +1189,6 @@ out:
        pagefault_enable();
 }
 
-#endif /* CONFIG_PGTABLE_MAPPING */
-
 static int zs_cpu_prepare(unsigned int cpu)
 {
        struct mapping_area *area;
@@ -1881,10 +1835,11 @@ static inline void zs_pool_dec_isolated(struct zs_pool *pool)
        VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
        atomic_long_dec(&pool->isolated_pages);
        /*
-        * There's no possibility of racing, since wait_for_isolated_drain()
-        * checks the isolated count under &class->lock after enqueuing
-        * on migration_wait.
+        * Checking pool->destroying must happen after atomic_long_dec()
+        * for pool->isolated_pages above. Paired with the smp_mb() in
+        * zs_unregister_migration().
         */
+       smp_mb__after_atomic();
        if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
                wake_up_all(&pool->migration_wait);
 }
@@ -2069,6 +2024,11 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
                zs_pool_dec_isolated(pool);
        }
 
+       if (page_zone(newpage) != page_zone(page)) {
+               dec_zone_page_state(page, NR_ZSPAGES);
+               inc_zone_page_state(newpage, NR_ZSPAGES);
+       }
+
        reset_page(page);
        put_page(page);
        page = newpage;
@@ -2257,11 +2217,13 @@ static unsigned long zs_can_compact(struct size_class *class)
        return obj_wasted * class->pages_per_zspage;
 }
 
-static void __zs_compact(struct zs_pool *pool, struct size_class *class)
+static unsigned long __zs_compact(struct zs_pool *pool,
+                                 struct size_class *class)
 {
        struct zs_compact_control cc;
        struct zspage *src_zspage;
        struct zspage *dst_zspage = NULL;
+       unsigned long pages_freed = 0;
 
        spin_lock(&class->lock);
        while ((src_zspage = isolate_zspage(class, true))) {
@@ -2291,7 +2253,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
                putback_zspage(class, dst_zspage);
                if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
                        free_zspage(pool, class, src_zspage);
-                       pool->stats.pages_compacted += class->pages_per_zspage;
+                       pages_freed += class->pages_per_zspage;
                }
                spin_unlock(&class->lock);
                cond_resched();
@@ -2302,12 +2264,15 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
                putback_zspage(class, src_zspage);
 
        spin_unlock(&class->lock);
+
+       return pages_freed;
 }
 
 unsigned long zs_compact(struct zs_pool *pool)
 {
        int i;
        struct size_class *class;
+       unsigned long pages_freed = 0;
 
        for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
                class = pool->size_class[i];
@@ -2315,10 +2280,11 @@ unsigned long zs_compact(struct zs_pool *pool)
                        continue;
                if (class->index != i)
                        continue;
-               __zs_compact(pool, class);
+               pages_freed += __zs_compact(pool, class);
        }
+       atomic_long_add(pages_freed, &pool->stats.pages_compacted);
 
-       return pool->stats.pages_compacted;
+       return pages_freed;
 }
 EXPORT_SYMBOL_GPL(zs_compact);
 
@@ -2335,13 +2301,12 @@ static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
        struct zs_pool *pool = container_of(shrinker, struct zs_pool,
                        shrinker);
 
-       pages_freed = pool->stats.pages_compacted;
        /*
         * Compact classes and calculate compaction delta.
         * Can run concurrently with a manually triggered
         * (by user) compaction.
         */
-       pages_freed = zs_compact(pool) - pages_freed;
+       pages_freed = zs_compact(pool);
 
        return pages_freed ? pages_freed : SHRINK_STOP;
 }