]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
swap_info: note SWAP_MAP_SHMEM
authorHugh Dickins <hugh.dickins@tiscali.co.uk>
Tue, 15 Dec 2009 01:58:47 +0000 (17:58 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Dec 2009 16:53:16 +0000 (08:53 -0800)
While we're fiddling with the swap_map values, let's assign a particular
value to shmem/tmpfs swap pages: their swap counts are never incremented,
and it helps swapoff's try_to_unuse() a little if it can immediately
distinguish those pages from process pages.

Since we've no use for SWAP_MAP_BAD | COUNT_CONTINUED,
we might as well use that 0xbf value for SWAP_MAP_SHMEM.

Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/swap.h
mm/shmem.c
mm/swapfile.c

index 389e7bd92cca312c3c3a42eb5ab7e149c52fc91b..ac43d87b89b092a748eae5c04f29929c676f4a0f 100644 (file)
@@ -157,6 +157,7 @@ enum {
 #define SWAP_HAS_CACHE 0x40    /* Flag page is cached, in first swap_map */
 #define SWAP_CONT_MAX  0x7f    /* Max count, in each swap_map continuation */
 #define COUNT_CONTINUED        0x80    /* See swap_map continuation for full count */
+#define SWAP_MAP_SHMEM 0xbf    /* Owned by shmem/tmpfs, in first swap_map */
 
 /*
  * The in-memory structure used to track swap areas.
@@ -316,6 +317,7 @@ extern swp_entry_t get_swap_page(void);
 extern swp_entry_t get_swap_page_of_type(int);
 extern int valid_swaphandles(swp_entry_t, unsigned long *);
 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
+extern void swap_shmem_alloc(swp_entry_t);
 extern int swap_duplicate(swp_entry_t);
 extern int swapcache_prepare(swp_entry_t);
 extern void swap_free(swp_entry_t);
@@ -394,6 +396,10 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
        return 0;
 }
 
+static inline void swap_shmem_alloc(swp_entry_t swp)
+{
+}
+
 static inline int swap_duplicate(swp_entry_t swp)
 {
        return 0;
index 356dd99566ecb671cd324ba3dddcee10441312f0..4fb41c83daca8d7c1096c380cc0f579f04908e1a 100644 (file)
@@ -1017,7 +1017,14 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
                        goto out;
        }
        mutex_unlock(&shmem_swaplist_mutex);
-out:   return found;   /* 0 or 1 or -ENOMEM */
+       /*
+        * Can some race bring us here?  We've been holding page lock,
+        * so I think not; but would rather try again later than BUG()
+        */
+       unlock_page(page);
+       page_cache_release(page);
+out:
+       return (found < 0) ? found : 0;
 }
 
 /*
@@ -1080,7 +1087,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                else
                        inode = NULL;
                spin_unlock(&info->lock);
-               swap_duplicate(swap);
+               swap_shmem_alloc(swap);
                BUG_ON(page_mapped(page));
                page_cache_release(page);       /* pagecache ref */
                swap_writepage(page, wbc);
index cc5e7ebf2d2c5105328688045080bcc6ce5a983f..58bec66001673dbd6dd58dd4d7bf8fa1fc53b135 100644 (file)
@@ -548,6 +548,12 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
        if (usage == SWAP_HAS_CACHE) {
                VM_BUG_ON(!has_cache);
                has_cache = 0;
+       } else if (count == SWAP_MAP_SHMEM) {
+               /*
+                * Or we could insist on shmem.c using a special
+                * swap_shmem_free() and free_shmem_swap_and_cache()...
+                */
+               count = 0;
        } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
                if (count == COUNT_CONTINUED) {
                        if (swap_count_continued(p, offset, count))
@@ -1031,7 +1037,6 @@ static int try_to_unuse(unsigned int type)
        swp_entry_t entry;
        unsigned int i = 0;
        int retval = 0;
-       int shmem;
 
        /*
         * When searching mms for an entry, a good strategy is to
@@ -1107,17 +1112,18 @@ static int try_to_unuse(unsigned int type)
 
                /*
                 * Remove all references to entry.
-                * Whenever we reach init_mm, there's no address space
-                * to search, but use it as a reminder to search shmem.
                 */
-               shmem = 0;
                swcount = *swap_map;
-               if (swap_count(swcount)) {
-                       if (start_mm == &init_mm)
-                               shmem = shmem_unuse(entry, page);
-                       else
-                               retval = unuse_mm(start_mm, entry, page);
+               if (swap_count(swcount) == SWAP_MAP_SHMEM) {
+                       retval = shmem_unuse(entry, page);
+                       /* page has already been unlocked and released */
+                       if (retval < 0)
+                               break;
+                       continue;
                }
+               if (swap_count(swcount) && start_mm != &init_mm)
+                       retval = unuse_mm(start_mm, entry, page);
+
                if (swap_count(*swap_map)) {
                        int set_start_mm = (*swap_map >= swcount);
                        struct list_head *p = &start_mm->mmlist;
@@ -1128,7 +1134,7 @@ static int try_to_unuse(unsigned int type)
                        atomic_inc(&new_start_mm->mm_users);
                        atomic_inc(&prev_mm->mm_users);
                        spin_lock(&mmlist_lock);
-                       while (swap_count(*swap_map) && !retval && !shmem &&
+                       while (swap_count(*swap_map) && !retval &&
                                        (p = p->next) != &start_mm->mmlist) {
                                mm = list_entry(p, struct mm_struct, mmlist);
                                if (!atomic_inc_not_zero(&mm->mm_users))
@@ -1142,10 +1148,9 @@ static int try_to_unuse(unsigned int type)
                                swcount = *swap_map;
                                if (!swap_count(swcount)) /* any usage ? */
                                        ;
-                               else if (mm == &init_mm) {
+                               else if (mm == &init_mm)
                                        set_start_mm = 1;
-                                       shmem = shmem_unuse(entry, page);
-                               } else
+                               else
                                        retval = unuse_mm(mm, entry, page);
 
                                if (set_start_mm && *swap_map < swcount) {
@@ -1161,13 +1166,6 @@ static int try_to_unuse(unsigned int type)
                        mmput(start_mm);
                        start_mm = new_start_mm;
                }
-               if (shmem) {
-                       /* page has already been unlocked and released */
-                       if (shmem > 0)
-                               continue;
-                       retval = shmem;
-                       break;
-               }
                if (retval) {
                        unlock_page(page);
                        page_cache_release(page);
@@ -2126,6 +2124,15 @@ bad_file:
        goto out;
 }
 
+/*
+ * Help swapoff by noting that swap entry belongs to shmem/tmpfs
+ * (in which case its reference count is never incremented).
+ */
+void swap_shmem_alloc(swp_entry_t entry)
+{
+       __swap_duplicate(entry, SWAP_MAP_SHMEM);
+}
+
 /*
  * increase reference count of swap entry by 1.
  */