#include <linux/eventfd.h>
#include <linux/mmzone.h>
#include <linux/writeback.h>
+#include <linux/page-flags.h>
struct mem_cgroup;
struct page;
* @idx: page state item to account
* @val: number of pages (positive or negative)
*
- * Callers must use lock_page_memcg() to prevent double accounting
- * when the page is concurrently being moved to another memcg:
+ * The @page must be locked or the caller must use lock_page_memcg()
+ * to prevent double accounting when the page is concurrently being
+ * moved to another memcg:
*
- * lock_page_memcg(page);
+ * lock_page(page) or lock_page_memcg(page)
* if (TestClearPageState(page))
* mem_cgroup_update_page_stat(page, state, -1);
- * unlock_page_memcg(page);
+ * unlock_page(page) or unlock_page_memcg(page)
*/
static inline void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_stat_index idx, int val)
{
- VM_BUG_ON(!rcu_read_lock_held());
+ VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
if (page->mem_cgroup)
this_cpu_add(page->mem_cgroup->stat->count[idx], val);
/*
* Delete a page from the page cache and free it. Caller has to make
* sure the page is locked and that nobody else uses it - or that usage
- * is safe. The caller must hold the mapping's tree_lock and
- * lock_page_memcg().
+ * is safe. The caller must hold the mapping's tree_lock.
*/
void __delete_from_page_cache(struct page *page, void *shadow)
{
freepage = mapping->a_ops->freepage;
- lock_page_memcg(page);
spin_lock_irqsave(&mapping->tree_lock, flags);
__delete_from_page_cache(page, NULL);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
- unlock_page_memcg(page);
if (freepage)
freepage(page);
new->mapping = mapping;
new->index = offset;
- lock_page_memcg(old);
spin_lock_irqsave(&mapping->tree_lock, flags);
__delete_from_page_cache(old, NULL);
error = radix_tree_insert(&mapping->page_tree, offset, new);
if (PageSwapBacked(new))
__inc_zone_page_state(new, NR_SHMEM);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
- unlock_page_memcg(old);
mem_cgroup_migrate(old, new);
radix_tree_preload_end();
if (freepage)
* always locked coming in here, so we get the desired
* exclusion.
*/
- lock_page_memcg(page);
wb = unlocked_inode_to_wb_begin(inode, &locked);
if (TestClearPageDirty(page)) {
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
ret = 1;
}
unlocked_inode_to_wb_end(inode, locked);
- unlock_page_memcg(page);
return ret;
}
return TestClearPageDirty(page);
if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
return 0;
- lock_page_memcg(page);
spin_lock_irqsave(&mapping->tree_lock, flags);
if (PageDirty(page))
goto failed;
BUG_ON(page_has_private(page));
__delete_from_page_cache(page, NULL);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
- unlock_page_memcg(page);
if (mapping->a_ops->freepage)
mapping->a_ops->freepage(page);
return 1;
failed:
spin_unlock_irqrestore(&mapping->tree_lock, flags);
- unlock_page_memcg(page);
return 0;
}
BUG_ON(!PageLocked(page));
BUG_ON(mapping != page_mapping(page));
- lock_page_memcg(page);
spin_lock_irqsave(&mapping->tree_lock, flags);
/*
* The non racy check for a busy page.
mem_cgroup_swapout(page, swap);
__delete_from_swap_cache(page);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
- unlock_page_memcg(page);
swapcache_free(swap);
} else {
void (*freepage)(struct page *);
shadow = workingset_eviction(mapping, page);
__delete_from_page_cache(page, shadow);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
- unlock_page_memcg(page);
if (freepage != NULL)
freepage(page);
cannot_free:
spin_unlock_irqrestore(&mapping->tree_lock, flags);
- unlock_page_memcg(page);
return 0;
}