]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/mm/gup.c
powerpc: Update default configurations
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / mm / gup.c
1 /*
2 * Lockless get_user_pages_fast for powerpc
3 *
4 * Copyright (C) 2008 Nick Piggin
5 * Copyright (C) 2008 Novell Inc.
6 */
7 #undef DEBUG
8
9 #include <linux/sched.h>
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/vmstat.h>
13 #include <linux/pagemap.h>
14 #include <linux/rwsem.h>
15 #include <asm/pgtable.h>
16
17 #ifdef __HAVE_ARCH_PTE_SPECIAL
18
19 /*
20 * The performance critical leaf functions are made noinline otherwise gcc
21 * inlines everything into a single function which results in too much
22 * register pressure.
23 */
24 static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
25 unsigned long end, int write, struct page **pages, int *nr)
26 {
27 unsigned long mask, result;
28 pte_t *ptep;
29
30 result = _PAGE_PRESENT|_PAGE_USER;
31 if (write)
32 result |= _PAGE_RW;
33 mask = result | _PAGE_SPECIAL;
34
35 ptep = pte_offset_kernel(&pmd, addr);
36 do {
37 pte_t pte = *ptep;
38 struct page *page;
39
40 if ((pte_val(pte) & mask) != result)
41 return 0;
42 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
43 page = pte_page(pte);
44 if (!page_cache_get_speculative(page))
45 return 0;
46 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
47 put_page(page);
48 return 0;
49 }
50 pages[*nr] = page;
51 (*nr)++;
52
53 } while (ptep++, addr += PAGE_SIZE, addr != end);
54
55 return 1;
56 }
57
58 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
59 int write, struct page **pages, int *nr)
60 {
61 unsigned long next;
62 pmd_t *pmdp;
63
64 pmdp = pmd_offset(&pud, addr);
65 do {
66 pmd_t pmd = *pmdp;
67
68 next = pmd_addr_end(addr, end);
69 if (pmd_none(pmd))
70 return 0;
71 if (pmd_huge(pmd)) {
72 if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
73 write, pages, nr))
74 return 0;
75 } else if (is_hugepd(pmdp)) {
76 if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT,
77 addr, next, write, pages, nr))
78 return 0;
79 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
80 return 0;
81 } while (pmdp++, addr = next, addr != end);
82
83 return 1;
84 }
85
86 static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
87 int write, struct page **pages, int *nr)
88 {
89 unsigned long next;
90 pud_t *pudp;
91
92 pudp = pud_offset(&pgd, addr);
93 do {
94 pud_t pud = *pudp;
95
96 next = pud_addr_end(addr, end);
97 if (pud_none(pud))
98 return 0;
99 if (pud_huge(pud)) {
100 if (!gup_hugepte((pte_t *)pudp, PUD_SIZE, addr, next,
101 write, pages, nr))
102 return 0;
103 } else if (is_hugepd(pudp)) {
104 if (!gup_hugepd((hugepd_t *)pudp, PUD_SHIFT,
105 addr, next, write, pages, nr))
106 return 0;
107 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
108 return 0;
109 } while (pudp++, addr = next, addr != end);
110
111 return 1;
112 }
113
114 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
115 struct page **pages)
116 {
117 struct mm_struct *mm = current->mm;
118 unsigned long addr, len, end;
119 unsigned long next;
120 pgd_t *pgdp;
121 int nr = 0;
122
123 pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
124
125 start &= PAGE_MASK;
126 addr = start;
127 len = (unsigned long) nr_pages << PAGE_SHIFT;
128 end = start + len;
129
130 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
131 start, len)))
132 goto slow_irqon;
133
134 pr_devel(" aligned: %lx .. %lx\n", start, end);
135
136 /*
137 * XXX: batch / limit 'nr', to avoid large irq off latency
138 * needs some instrumenting to determine the common sizes used by
139 * important workloads (eg. DB2), and whether limiting the batch size
140 * will decrease performance.
141 *
142 * It seems like we're in the clear for the moment. Direct-IO is
143 * the main guy that batches up lots of get_user_pages, and even
144 * they are limited to 64-at-a-time which is not so many.
145 */
146 /*
147 * This doesn't prevent pagetable teardown, but does prevent
148 * the pagetables from being freed on powerpc.
149 *
150 * So long as we atomically load page table pointers versus teardown,
151 * we can follow the address down to the the page and take a ref on it.
152 */
153 local_irq_disable();
154
155 pgdp = pgd_offset(mm, addr);
156 do {
157 pgd_t pgd = *pgdp;
158
159 pr_devel(" %016lx: normal pgd %p\n", addr,
160 (void *)pgd_val(pgd));
161 next = pgd_addr_end(addr, end);
162 if (pgd_none(pgd))
163 goto slow;
164 if (pgd_huge(pgd)) {
165 if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next,
166 write, pages, &nr))
167 goto slow;
168 } else if (is_hugepd(pgdp)) {
169 if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
170 addr, next, write, pages, &nr))
171 goto slow;
172 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
173 goto slow;
174 } while (pgdp++, addr = next, addr != end);
175
176 local_irq_enable();
177
178 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
179 return nr;
180
181 {
182 int ret;
183
184 slow:
185 local_irq_enable();
186 slow_irqon:
187 pr_devel(" slow path ! nr = %d\n", nr);
188
189 /* Try to get the remaining pages with get_user_pages */
190 start += nr << PAGE_SHIFT;
191 pages += nr;
192
193 down_read(&mm->mmap_sem);
194 ret = get_user_pages(current, mm, start,
195 (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
196 up_read(&mm->mmap_sem);
197
198 /* Have to be a bit careful with return values */
199 if (nr > 0) {
200 if (ret < 0)
201 ret = nr;
202 else
203 ret += nr;
204 }
205
206 return ret;
207 }
208 }
209
210 #endif /* __HAVE_ARCH_PTE_SPECIAL */