]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
mm/lru: move lock into lru_note_cost
authorAlex Shi <alex.shi@linux.alibaba.com>
Tue, 15 Dec 2020 22:20:50 +0000 (14:20 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Dec 2020 22:48:03 +0000 (14:48 -0800)
We have to move lru_lock into lru_note_cost, since it cycle up on memcg
tree, for future per lruvec lru_lock replace.  It's a bit ugly and may
cost a bit more locking, but benefit from multiple memcg locking could
cover the lost.

Link: https://lkml.kernel.org/r/1604566549-62481-11-git-send-email-alex.shi@linux.alibaba.com
Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
Acked-by: Hugh Dickins <hughd@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Alexander Duyck <alexander.duyck@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: "Chen, Rong A" <rong.a.chen@intel.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Jann Horn <jannh@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mika Penttilä <mika.penttila@nextfour.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/swap.c
mm/vmscan.c
mm/workingset.c

index b4ca5b96583871ab969cf20e0d5c27a62fa684ae..c3187d04f9b2ba12a5eaee95b30d6b19881a6fd8 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -268,7 +268,9 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
 {
        do {
                unsigned long lrusize;
+               struct pglist_data *pgdat = lruvec_pgdat(lruvec);
 
+               spin_lock_irq(&pgdat->lru_lock);
                /* Record cost event */
                if (file)
                        lruvec->file_cost += nr_pages;
@@ -292,6 +294,7 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
                        lruvec->file_cost /= 2;
                        lruvec->anon_cost /= 2;
                }
+               spin_unlock_irq(&pgdat->lru_lock);
        } while ((lruvec = parent_lruvec(lruvec)));
 }
 
index 1c3df77972e8e828f3df3629a1d9111ca92d571d..f69601d39a6dbafa91eaebeec71da8575f66d616 100644 (file)
@@ -1971,19 +1971,17 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
        nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
 
        spin_lock_irq(&pgdat->lru_lock);
-
        move_pages_to_lru(lruvec, &page_list);
 
        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
-       lru_note_cost(lruvec, file, stat.nr_pageout);
        item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
        if (!cgroup_reclaim(sc))
                __count_vm_events(item, nr_reclaimed);
        __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
        __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
-
        spin_unlock_irq(&pgdat->lru_lock);
 
+       lru_note_cost(lruvec, file, stat.nr_pageout);
        mem_cgroup_uncharge_list(&page_list);
        free_unref_page_list(&page_list);
 
index 25f75bbe80e046a837136f57bde207f538582535..94b512538d5a199048f7429dd763ba7017c04b81 100644 (file)
@@ -381,9 +381,7 @@ void workingset_refault(struct page *page, void *shadow)
        if (workingset) {
                SetPageWorkingset(page);
                /* XXX: Move to lru_cache_add() when it supports new vs putback */
-               spin_lock_irq(&page_pgdat(page)->lru_lock);
                lru_note_cost_page(page);
-               spin_unlock_irq(&page_pgdat(page)->lru_lock);
                inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
        }
 out: