]> git.proxmox.com Git - pve-kernel-jessie.git/blame - CVE-2016-4794-2-percpu-fix-synchronization-between-synchronous-map-e.patch
add scheduler fix for ceph on numa hosts
[pve-kernel-jessie.git] / CVE-2016-4794-2-percpu-fix-synchronization-between-synchronous-map-e.patch
CommitLineData
1e4df1fa
FG
1From b79d7d28463cd1988fa43b3a8bb5279471d837f7 Mon Sep 17 00:00:00 2001
2From: Tejun Heo <tj@kernel.org>
3Date: Tue, 12 Jul 2016 17:03:16 +0100
4Subject: [PATCH 2/2] percpu: fix synchronization between synchronous map
5 extension and chunk destruction
6
7For non-atomic allocations, pcpu_alloc() can try to extend the area
8map synchronously after dropping pcpu_lock; however, the extension
9wasn't synchronized against chunk destruction and the chunk might get
10freed while extension is in progress.
11
12This patch fixes the bug by putting most of non-atomic allocations
13under pcpu_alloc_mutex to synchronize against pcpu_balance_work which
14is responsible for async chunk management including destruction.
15
16Signed-off-by: Tejun Heo <tj@kernel.org>
17Reported-and-tested-by: Alexei Starovoitov <alexei.starovoitov@gmail.com>
18Reported-by: Vlastimil Babka <vbabka@suse.cz>
19Reported-by: Sasha Levin <sasha.levin@oracle.com>
20Cc: stable@vger.kernel.org # v3.18+
21Fixes: 1a4d76076cda ("percpu: implement asynchronous chunk population")
22(cherry picked from commit 6710e594f71ccaad8101bc64321152af7cd9ea28)
23CVE-2016-4794
24BugLink: https://bugs.launchpad.net/bugs/1581871
25Signed-off-by: Luis Henriques <luis.henriques@canonical.com>
26Acked-by: Christopher Arges <chris.j.arges@canonical.com>
27Signed-off-by: Kamal Mostafa <kamal@canonical.com>
28---
29 mm/percpu.c | 16 ++++++++--------
30 1 file changed, 8 insertions(+), 8 deletions(-)
31
32diff --git a/mm/percpu.c b/mm/percpu.c
33index 58b0149..1f376bc 100644
34--- a/mm/percpu.c
35+++ b/mm/percpu.c
36@@ -160,7 +160,7 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
37 static int pcpu_reserved_chunk_limit;
38
39 static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
40-static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
41+static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
42
43 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
44
45@@ -446,6 +446,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
46 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
47 unsigned long flags;
48
49+ lockdep_assert_held(&pcpu_alloc_mutex);
50+
51 new = pcpu_mem_zalloc(new_size);
52 if (!new)
53 return -ENOMEM;
54@@ -892,6 +894,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
55 return NULL;
56 }
57
58+ if (!is_atomic)
59+ mutex_lock(&pcpu_alloc_mutex);
60+
61 spin_lock_irqsave(&pcpu_lock, flags);
62
63 /* serve reserved allocations from the reserved chunk if available */
64@@ -964,12 +969,9 @@ restart:
65 if (is_atomic)
66 goto fail;
67
68- mutex_lock(&pcpu_alloc_mutex);
69-
70 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
71 chunk = pcpu_create_chunk();
72 if (!chunk) {
73- mutex_unlock(&pcpu_alloc_mutex);
74 err = "failed to allocate new chunk";
75 goto fail;
76 }
77@@ -980,7 +982,6 @@ restart:
78 spin_lock_irqsave(&pcpu_lock, flags);
79 }
80
81- mutex_unlock(&pcpu_alloc_mutex);
82 goto restart;
83
84 area_found:
85@@ -990,8 +991,6 @@ area_found:
86 if (!is_atomic) {
87 int page_start, page_end, rs, re;
88
89- mutex_lock(&pcpu_alloc_mutex);
90-
91 page_start = PFN_DOWN(off);
92 page_end = PFN_UP(off + size);
93
94@@ -1002,7 +1001,6 @@ area_found:
95
96 spin_lock_irqsave(&pcpu_lock, flags);
97 if (ret) {
98- mutex_unlock(&pcpu_alloc_mutex);
99 pcpu_free_area(chunk, off, &occ_pages);
100 err = "failed to populate";
101 goto fail_unlock;
102@@ -1042,6 +1040,8 @@ fail:
103 /* see the flag handling in pcpu_blance_workfn() */
104 pcpu_atomic_alloc_failed = true;
105 pcpu_schedule_balance_work();
106+ } else {
107+ mutex_unlock(&pcpu_alloc_mutex);
108 }
109 return NULL;
110 }
111--
1122.1.4
113