1 From b79d7d28463cd1988fa43b3a8bb5279471d837f7 Mon Sep 17 00:00:00 2001
2 From: Tejun Heo <tj@kernel.org>
3 Date: Tue, 12 Jul 2016 17:03:16 +0100
4 Subject: [PATCH 2/2] percpu: fix synchronization between synchronous map
5 extension and chunk destruction
7 For non-atomic allocations, pcpu_alloc() can try to extend the area
8 map synchronously after dropping pcpu_lock; however, the extension
9 wasn't synchronized against chunk destruction and the chunk might get
10 freed while extension is in progress.
12 This patch fixes the bug by putting most of non-atomic allocations
13 under pcpu_alloc_mutex to synchronize against pcpu_balance_work which
14 is responsible for async chunk management including destruction.
16 Signed-off-by: Tejun Heo <tj@kernel.org>
17 Reported-and-tested-by: Alexei Starovoitov <alexei.starovoitov@gmail.com>
18 Reported-by: Vlastimil Babka <vbabka@suse.cz>
19 Reported-by: Sasha Levin <sasha.levin@oracle.com>
20 Cc: stable@vger.kernel.org # v3.18+
21 Fixes: 1a4d76076cda ("percpu: implement asynchronous chunk population")
22 (cherry picked from commit 6710e594f71ccaad8101bc64321152af7cd9ea28)
24 BugLink: https://bugs.launchpad.net/bugs/1581871
25 Signed-off-by: Luis Henriques <luis.henriques@canonical.com>
26 Acked-by: Christopher Arges <chris.j.arges@canonical.com>
27 Signed-off-by: Kamal Mostafa <kamal@canonical.com>
29 mm/percpu.c | 16 ++++++++--------
30 1 file changed, 8 insertions(+), 8 deletions(-)
32 diff --git a/mm/percpu.c b/mm/percpu.c
33 index 58b0149..1f376bc 100644
36 @@ -160,7 +160,7 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
37 static int pcpu_reserved_chunk_limit;
39 static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
40 -static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
41 +static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
43 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
45 @@ -446,6 +446,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
46 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
49 + lockdep_assert_held(&pcpu_alloc_mutex);
51 new = pcpu_mem_zalloc(new_size);
54 @@ -892,6 +894,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
59 + mutex_lock(&pcpu_alloc_mutex);
61 spin_lock_irqsave(&pcpu_lock, flags);
63 /* serve reserved allocations from the reserved chunk if available */
64 @@ -964,12 +969,9 @@ restart:
68 - mutex_lock(&pcpu_alloc_mutex);
70 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
71 chunk = pcpu_create_chunk();
73 - mutex_unlock(&pcpu_alloc_mutex);
74 err = "failed to allocate new chunk";
77 @@ -980,7 +982,6 @@ restart:
78 spin_lock_irqsave(&pcpu_lock, flags);
81 - mutex_unlock(&pcpu_alloc_mutex);
85 @@ -990,8 +991,6 @@ area_found:
87 int page_start, page_end, rs, re;
89 - mutex_lock(&pcpu_alloc_mutex);
91 page_start = PFN_DOWN(off);
92 page_end = PFN_UP(off + size);
94 @@ -1002,7 +1001,6 @@ area_found:
96 spin_lock_irqsave(&pcpu_lock, flags);
98 - mutex_unlock(&pcpu_alloc_mutex);
99 pcpu_free_area(chunk, off, &occ_pages);
100 err = "failed to populate";
102 @@ -1042,6 +1040,8 @@ fail:
103 /* see the flag handling in pcpu_blance_workfn() */
104 pcpu_atomic_alloc_failed = true;
105 pcpu_schedule_balance_work();
107 + mutex_unlock(&pcpu_alloc_mutex);