1 From 7c5d42f31bf68647dd00ac2fef9057d113e8072d Mon Sep 17 00:00:00 2001
2 From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
3 Date: Sat, 9 Sep 2017 00:56:03 +0300
4 Subject: [PATCH 050/242] mm, x86/mm: Fix performance regression in
7 Content-Type: text/plain; charset=UTF-8
8 Content-Transfer-Encoding: 8bit
12 The 0-day test bot found a performance regression that was tracked down to
13 switching x86 to the generic get_user_pages_fast() implementation:
15 http://lkml.kernel.org/r/20170710024020.GA26389@yexl-desktop
17 The regression was caused by the fact that we now use local_irq_save() +
18 local_irq_restore() in get_user_pages_fast() to disable interrupts.
19 In x86 implementation local_irq_disable() + local_irq_enable() was used.
21 The fix is to make get_user_pages_fast() use local_irq_disable(),
22 leaving local_irq_save() for __get_user_pages_fast() that can be called
23 with interrupts disabled.
25 Numbers for pinning a gigabyte of memory, one page a time, 20 repeats:
27 Before: Average: 14.91 ms, stddev: 0.45 ms
28 After: Average: 10.76 ms, stddev: 0.18 ms
30 Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
31 Cc: Andrew Morton <akpm@linux-foundation.org>
32 Cc: Huang Ying <ying.huang@intel.com>
33 Cc: Jonathan Corbet <corbet@lwn.net>
34 Cc: Linus Torvalds <torvalds@linux-foundation.org>
35 Cc: Peter Zijlstra <peterz@infradead.org>
36 Cc: Thomas Gleixner <tglx@linutronix.de>
37 Cc: Thorsten Leemhuis <regressions@leemhuis.info>
38 Cc: linux-mm@kvack.org
39 Fixes: e585513b76f7 ("x86/mm/gup: Switch GUP to the generic get_user_page_fast() implementation")
40 Link: http://lkml.kernel.org/r/20170908215603.9189-3-kirill.shutemov@linux.intel.com
41 Signed-off-by: Ingo Molnar <mingo@kernel.org>
42 (cherry picked from commit 5b65c4677a57a1d4414212f9995aa0e46a21ff80)
43 Signed-off-by: Andy Whitcroft <apw@canonical.com>
44 Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
45 (cherry picked from commit 5241f4b2c68284612e34910305f3234e4a64701b)
46 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
48 mm/gup.c | 97 ++++++++++++++++++++++++++++++++++++++--------------------------
49 1 file changed, 58 insertions(+), 39 deletions(-)
51 diff --git a/mm/gup.c b/mm/gup.c
52 index 23f01c40c88f..4a789f1c6a27 100644
55 @@ -1618,6 +1618,47 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
59 +static void gup_pgd_range(unsigned long addr, unsigned long end,
60 + int write, struct page **pages, int *nr)
65 + pgdp = pgd_offset(current->mm, addr);
67 + pgd_t pgd = READ_ONCE(*pgdp);
69 + next = pgd_addr_end(addr, end);
72 + if (unlikely(pgd_huge(pgd))) {
73 + if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
76 + } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
77 + if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
78 + PGDIR_SHIFT, next, write, pages, nr))
80 + } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr))
82 + } while (pgdp++, addr = next, addr != end);
85 +#ifndef gup_fast_permitted
87 + * Check if it's allowed to use __get_user_pages_fast() for the range, or
88 + * we need to fall back to the slow version:
90 +bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
92 + unsigned long len, end;
94 + len = (unsigned long) nr_pages << PAGE_SHIFT;
96 + return end >= start;
101 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
102 * the regular GUP. It will only return non-negative values.
103 @@ -1625,10 +1666,8 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
104 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
107 - struct mm_struct *mm = current->mm;
108 unsigned long addr, len, end;
109 - unsigned long next, flags;
111 + unsigned long flags;
115 @@ -1652,45 +1691,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
116 * block IPIs that come from THPs splitting.
119 - local_irq_save(flags);
120 - pgdp = pgd_offset(mm, addr);
122 - pgd_t pgd = READ_ONCE(*pgdp);
124 - next = pgd_addr_end(addr, end);
127 - if (unlikely(pgd_huge(pgd))) {
128 - if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
131 - } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
132 - if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
133 - PGDIR_SHIFT, next, write, pages, &nr))
135 - } else if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
137 - } while (pgdp++, addr = next, addr != end);
138 - local_irq_restore(flags);
139 + if (gup_fast_permitted(start, nr_pages, write)) {
140 + local_irq_save(flags);
141 + gup_pgd_range(addr, end, write, pages, &nr);
142 + local_irq_restore(flags);
148 -#ifndef gup_fast_permitted
150 - * Check if it's allowed to use __get_user_pages_fast() for the range, or
151 - * we need to fall back to the slow version:
153 -bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
155 - unsigned long len, end;
157 - len = (unsigned long) nr_pages << PAGE_SHIFT;
159 - return end >= start;
164 * get_user_pages_fast() - pin user pages in memory
165 * @start: starting user address
166 @@ -1710,12 +1719,22 @@ bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
167 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
170 + unsigned long addr, len, end;
175 + len = (unsigned long) nr_pages << PAGE_SHIFT;
178 + if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
179 + (void __user *)start, len)))
182 if (gup_fast_permitted(start, nr_pages, write)) {
183 - nr = __get_user_pages_fast(start, nr_pages, write, pages);
184 + local_irq_disable();
185 + gup_pgd_range(addr, end, write, pages, &nr);
186 + local_irq_enable();