]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - mm/swapfile.c
UBUNTU: [Config] CONFIG_NET_DSA_LOOP=m
[mirror_ubuntu-artful-kernel.git] / mm / swapfile.c
index 6ba4aab2db0b570a241abb8935f901833c65b862..3191465b0ccf62f130c6ff5b608905dbaf916d35 100644 (file)
@@ -2635,6 +2635,7 @@ static struct swap_info_struct *alloc_swap_info(void)
        p->flags = SWP_USED;
        spin_unlock(&swap_lock);
        spin_lock_init(&p->lock);
+       spin_lock_init(&p->cont_lock);
 
        return p;
 }
@@ -3052,7 +3053,8 @@ bad_swap:
        p->flags = 0;
        spin_unlock(&swap_lock);
        vfree(swap_map);
-       vfree(cluster_info);
+       kvfree(cluster_info);
+       kvfree(frontswap_map);
        if (swap_file) {
                if (inode && S_ISREG(inode->i_mode)) {
                        inode_unlock(inode);
@@ -3306,6 +3308,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
        head = vmalloc_to_page(si->swap_map + offset);
        offset &= ~PAGE_MASK;
 
+       spin_lock(&si->cont_lock);
        /*
         * Page allocation does not initialize the page's lru field,
         * but it does always reset its private field.
@@ -3325,7 +3328,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
                 * a continuation page, free our allocation and use this one.
                 */
                if (!(count & COUNT_CONTINUED))
-                       goto out;
+                       goto out_unlock_cont;
 
                map = kmap_atomic(list_page) + offset;
                count = *map;
@@ -3336,11 +3339,13 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
                 * free our allocation and use this one.
                 */
                if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
-                       goto out;
+                       goto out_unlock_cont;
        }
 
        list_add_tail(&page->lru, &head->lru);
        page = NULL;                    /* now it's attached, don't free it */
+out_unlock_cont:
+       spin_unlock(&si->cont_lock);
 out:
        unlock_cluster(ci);
        spin_unlock(&si->lock);
@@ -3365,6 +3370,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
        struct page *head;
        struct page *page;
        unsigned char *map;
+       bool ret;
 
        head = vmalloc_to_page(si->swap_map + offset);
        if (page_private(head) != SWP_CONTINUED) {
@@ -3372,6 +3378,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
                return false;           /* need to add count continuation */
        }
 
+       spin_lock(&si->cont_lock);
        offset &= ~PAGE_MASK;
        page = list_entry(head->lru.next, struct page, lru);
        map = kmap_atomic(page) + offset;
@@ -3392,8 +3399,10 @@ static bool swap_count_continued(struct swap_info_struct *si,
                if (*map == SWAP_CONT_MAX) {
                        kunmap_atomic(map);
                        page = list_entry(page->lru.next, struct page, lru);
-                       if (page == head)
-                               return false;   /* add count continuation */
+                       if (page == head) {
+                               ret = false;    /* add count continuation */
+                               goto out;
+                       }
                        map = kmap_atomic(page) + offset;
 init_map:              *map = 0;               /* we didn't zero the page */
                }
@@ -3406,7 +3415,7 @@ init_map:         *map = 0;               /* we didn't zero the page */
                        kunmap_atomic(map);
                        page = list_entry(page->lru.prev, struct page, lru);
                }
-               return true;                    /* incremented */
+               ret = true;                     /* incremented */
 
        } else {                                /* decrementing */
                /*
@@ -3432,8 +3441,11 @@ init_map:                *map = 0;               /* we didn't zero the page */
                        kunmap_atomic(map);
                        page = list_entry(page->lru.prev, struct page, lru);
                }
-               return count == COUNT_CONTINUED;
+               ret = count == COUNT_CONTINUED;
        }
+out:
+       spin_unlock(&si->cont_lock);
+       return ret;
 }
 
 /*