]>
Commit | Line | Data |
---|---|---|
3610cce8 | 1 | /* |
a53c8fab | 2 | * Copyright IBM Corp. 2007, 2011 |
3610cce8 MS |
3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
4 | */ | |
5 | ||
6 | #include <linux/sched.h> | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/errno.h> | |
5a0e3ad6 | 9 | #include <linux/gfp.h> |
3610cce8 MS |
10 | #include <linux/mm.h> |
11 | #include <linux/swap.h> | |
12 | #include <linux/smp.h> | |
13 | #include <linux/highmem.h> | |
3610cce8 MS |
14 | #include <linux/pagemap.h> |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/quicklist.h> | |
80217147 | 18 | #include <linux/rcupdate.h> |
e5992f2e | 19 | #include <linux/slab.h> |
3610cce8 | 20 | |
3610cce8 MS |
21 | #include <asm/pgtable.h> |
22 | #include <asm/pgalloc.h> | |
23 | #include <asm/tlb.h> | |
24 | #include <asm/tlbflush.h> | |
6252d702 | 25 | #include <asm/mmu_context.h> |
3610cce8 MS |
26 | |
27 | #ifndef CONFIG_64BIT | |
28 | #define ALLOC_ORDER 1 | |
36409f63 | 29 | #define FRAG_MASK 0x0f |
3610cce8 MS |
30 | #else |
31 | #define ALLOC_ORDER 2 | |
36409f63 | 32 | #define FRAG_MASK 0x03 |
3610cce8 MS |
33 | #endif |
34 | ||
239a6425 | 35 | |
043d0708 | 36 | unsigned long *crst_table_alloc(struct mm_struct *mm) |
3610cce8 MS |
37 | { |
38 | struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | |
39 | ||
40 | if (!page) | |
41 | return NULL; | |
3610cce8 MS |
42 | return (unsigned long *) page_to_phys(page); |
43 | } | |
44 | ||
80217147 MS |
45 | void crst_table_free(struct mm_struct *mm, unsigned long *table) |
46 | { | |
043d0708 | 47 | free_pages((unsigned long) table, ALLOC_ORDER); |
80217147 MS |
48 | } |
49 | ||
6252d702 MS |
50 | #ifdef CONFIG_64BIT |
51 | int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) | |
52 | { | |
53 | unsigned long *table, *pgd; | |
54 | unsigned long entry; | |
55 | ||
56 | BUG_ON(limit > (1UL << 53)); | |
57 | repeat: | |
043d0708 | 58 | table = crst_table_alloc(mm); |
6252d702 MS |
59 | if (!table) |
60 | return -ENOMEM; | |
80217147 | 61 | spin_lock_bh(&mm->page_table_lock); |
6252d702 MS |
62 | if (mm->context.asce_limit < limit) { |
63 | pgd = (unsigned long *) mm->pgd; | |
64 | if (mm->context.asce_limit <= (1UL << 31)) { | |
65 | entry = _REGION3_ENTRY_EMPTY; | |
66 | mm->context.asce_limit = 1UL << 42; | |
67 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
68 | _ASCE_USER_BITS | | |
69 | _ASCE_TYPE_REGION3; | |
70 | } else { | |
71 | entry = _REGION2_ENTRY_EMPTY; | |
72 | mm->context.asce_limit = 1UL << 53; | |
73 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
74 | _ASCE_USER_BITS | | |
75 | _ASCE_TYPE_REGION2; | |
76 | } | |
77 | crst_table_init(table, entry); | |
78 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); | |
79 | mm->pgd = (pgd_t *) table; | |
f481bfaf | 80 | mm->task_size = mm->context.asce_limit; |
6252d702 MS |
81 | table = NULL; |
82 | } | |
80217147 | 83 | spin_unlock_bh(&mm->page_table_lock); |
6252d702 MS |
84 | if (table) |
85 | crst_table_free(mm, table); | |
86 | if (mm->context.asce_limit < limit) | |
87 | goto repeat; | |
6252d702 MS |
88 | return 0; |
89 | } | |
90 | ||
91 | void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |
92 | { | |
93 | pgd_t *pgd; | |
94 | ||
6252d702 MS |
95 | while (mm->context.asce_limit > limit) { |
96 | pgd = mm->pgd; | |
97 | switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { | |
98 | case _REGION_ENTRY_TYPE_R2: | |
99 | mm->context.asce_limit = 1UL << 42; | |
100 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
101 | _ASCE_USER_BITS | | |
102 | _ASCE_TYPE_REGION3; | |
103 | break; | |
104 | case _REGION_ENTRY_TYPE_R3: | |
105 | mm->context.asce_limit = 1UL << 31; | |
106 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | |
107 | _ASCE_USER_BITS | | |
108 | _ASCE_TYPE_SEGMENT; | |
109 | break; | |
110 | default: | |
111 | BUG(); | |
112 | } | |
113 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); | |
f481bfaf | 114 | mm->task_size = mm->context.asce_limit; |
6252d702 MS |
115 | crst_table_free(mm, (unsigned long *) pgd); |
116 | } | |
6252d702 MS |
117 | } |
118 | #endif | |
119 | ||
e5992f2e MS |
120 | #ifdef CONFIG_PGSTE |
121 | ||
122 | /** | |
123 | * gmap_alloc - allocate a guest address space | |
124 | * @mm: pointer to the parent mm_struct | |
125 | * | |
126 | * Returns a guest address space structure. | |
127 | */ | |
128 | struct gmap *gmap_alloc(struct mm_struct *mm) | |
36409f63 | 129 | { |
e5992f2e MS |
130 | struct gmap *gmap; |
131 | struct page *page; | |
132 | unsigned long *table; | |
36409f63 | 133 | |
e5992f2e MS |
134 | gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); |
135 | if (!gmap) | |
136 | goto out; | |
137 | INIT_LIST_HEAD(&gmap->crst_list); | |
138 | gmap->mm = mm; | |
139 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | |
140 | if (!page) | |
141 | goto out_free; | |
142 | list_add(&page->lru, &gmap->crst_list); | |
143 | table = (unsigned long *) page_to_phys(page); | |
144 | crst_table_init(table, _REGION1_ENTRY_EMPTY); | |
145 | gmap->table = table; | |
480e5926 CB |
146 | gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | |
147 | _ASCE_USER_BITS | __pa(table); | |
e5992f2e MS |
148 | list_add(&gmap->list, &mm->context.gmap_list); |
149 | return gmap; | |
150 | ||
151 | out_free: | |
152 | kfree(gmap); | |
153 | out: | |
154 | return NULL; | |
36409f63 | 155 | } |
e5992f2e | 156 | EXPORT_SYMBOL_GPL(gmap_alloc); |
36409f63 | 157 | |
e5992f2e MS |
158 | static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) |
159 | { | |
160 | struct gmap_pgtable *mp; | |
161 | struct gmap_rmap *rmap; | |
162 | struct page *page; | |
163 | ||
e5098611 | 164 | if (*table & _SEGMENT_ENTRY_INVALID) |
e5992f2e MS |
165 | return 0; |
166 | page = pfn_to_page(*table >> PAGE_SHIFT); | |
167 | mp = (struct gmap_pgtable *) page->index; | |
168 | list_for_each_entry(rmap, &mp->mapper, list) { | |
169 | if (rmap->entry != table) | |
170 | continue; | |
171 | list_del(&rmap->list); | |
172 | kfree(rmap); | |
173 | break; | |
174 | } | |
e5098611 | 175 | *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT; |
e5992f2e MS |
176 | return 1; |
177 | } | |
178 | ||
179 | static void gmap_flush_tlb(struct gmap *gmap) | |
180 | { | |
181 | if (MACHINE_HAS_IDTE) | |
182 | __tlb_flush_idte((unsigned long) gmap->table | | |
183 | _ASCE_TYPE_REGION1); | |
184 | else | |
185 | __tlb_flush_global(); | |
186 | } | |
187 | ||
188 | /** | |
189 | * gmap_free - free a guest address space | |
190 | * @gmap: pointer to the guest address space structure | |
3610cce8 | 191 | */ |
e5992f2e MS |
192 | void gmap_free(struct gmap *gmap) |
193 | { | |
194 | struct page *page, *next; | |
195 | unsigned long *table; | |
196 | int i; | |
197 | ||
198 | ||
199 | /* Flush tlb. */ | |
200 | if (MACHINE_HAS_IDTE) | |
201 | __tlb_flush_idte((unsigned long) gmap->table | | |
202 | _ASCE_TYPE_REGION1); | |
203 | else | |
204 | __tlb_flush_global(); | |
205 | ||
206 | /* Free all segment & region tables. */ | |
207 | down_read(&gmap->mm->mmap_sem); | |
cc772456 | 208 | spin_lock(&gmap->mm->page_table_lock); |
e5992f2e MS |
209 | list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { |
210 | table = (unsigned long *) page_to_phys(page); | |
211 | if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) | |
212 | /* Remove gmap rmap structures for segment table. */ | |
213 | for (i = 0; i < PTRS_PER_PMD; i++, table++) | |
214 | gmap_unlink_segment(gmap, table); | |
215 | __free_pages(page, ALLOC_ORDER); | |
216 | } | |
cc772456 | 217 | spin_unlock(&gmap->mm->page_table_lock); |
e5992f2e MS |
218 | up_read(&gmap->mm->mmap_sem); |
219 | list_del(&gmap->list); | |
220 | kfree(gmap); | |
221 | } | |
222 | EXPORT_SYMBOL_GPL(gmap_free); | |
223 | ||
224 | /** | |
225 | * gmap_enable - switch primary space to the guest address space | |
226 | * @gmap: pointer to the guest address space structure | |
227 | */ | |
228 | void gmap_enable(struct gmap *gmap) | |
229 | { | |
e5992f2e MS |
230 | S390_lowcore.gmap = (unsigned long) gmap; |
231 | } | |
232 | EXPORT_SYMBOL_GPL(gmap_enable); | |
233 | ||
234 | /** | |
235 | * gmap_disable - switch back to the standard primary address space | |
236 | * @gmap: pointer to the guest address space structure | |
237 | */ | |
238 | void gmap_disable(struct gmap *gmap) | |
239 | { | |
e5992f2e MS |
240 | S390_lowcore.gmap = 0UL; |
241 | } | |
242 | EXPORT_SYMBOL_GPL(gmap_disable); | |
243 | ||
a9162f23 CO |
244 | /* |
245 | * gmap_alloc_table is assumed to be called with mmap_sem held | |
246 | */ | |
e5992f2e | 247 | static int gmap_alloc_table(struct gmap *gmap, |
984e2a59 HC |
248 | unsigned long *table, unsigned long init) |
249 | __releases(&gmap->mm->page_table_lock) | |
250 | __acquires(&gmap->mm->page_table_lock) | |
e5992f2e MS |
251 | { |
252 | struct page *page; | |
253 | unsigned long *new; | |
254 | ||
c86cce2a CB |
255 | /* since we dont free the gmap table until gmap_free we can unlock */ |
256 | spin_unlock(&gmap->mm->page_table_lock); | |
e5992f2e | 257 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
c86cce2a | 258 | spin_lock(&gmap->mm->page_table_lock); |
e5992f2e MS |
259 | if (!page) |
260 | return -ENOMEM; | |
261 | new = (unsigned long *) page_to_phys(page); | |
262 | crst_table_init(new, init); | |
e5098611 | 263 | if (*table & _REGION_ENTRY_INVALID) { |
e5992f2e MS |
264 | list_add(&page->lru, &gmap->crst_list); |
265 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | | |
266 | (*table & _REGION_ENTRY_TYPE_MASK); | |
267 | } else | |
268 | __free_pages(page, ALLOC_ORDER); | |
e5992f2e MS |
269 | return 0; |
270 | } | |
271 | ||
272 | /** | |
273 | * gmap_unmap_segment - unmap segment from the guest address space | |
274 | * @gmap: pointer to the guest address space structure | |
275 | * @addr: address in the guest address space | |
276 | * @len: length of the memory area to unmap | |
277 | * | |
278 | * Returns 0 if the unmap succeded, -EINVAL if not. | |
279 | */ | |
280 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | |
281 | { | |
282 | unsigned long *table; | |
283 | unsigned long off; | |
284 | int flush; | |
285 | ||
286 | if ((to | len) & (PMD_SIZE - 1)) | |
287 | return -EINVAL; | |
288 | if (len == 0 || to + len < to) | |
289 | return -EINVAL; | |
290 | ||
291 | flush = 0; | |
292 | down_read(&gmap->mm->mmap_sem); | |
cc772456 | 293 | spin_lock(&gmap->mm->page_table_lock); |
e5992f2e MS |
294 | for (off = 0; off < len; off += PMD_SIZE) { |
295 | /* Walk the guest addr space page table */ | |
296 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | |
e5098611 | 297 | if (*table & _REGION_ENTRY_INVALID) |
05873df9 | 298 | goto out; |
e5992f2e MS |
299 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
300 | table = table + (((to + off) >> 42) & 0x7ff); | |
e5098611 | 301 | if (*table & _REGION_ENTRY_INVALID) |
05873df9 | 302 | goto out; |
e5992f2e MS |
303 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
304 | table = table + (((to + off) >> 31) & 0x7ff); | |
e5098611 | 305 | if (*table & _REGION_ENTRY_INVALID) |
05873df9 | 306 | goto out; |
e5992f2e MS |
307 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
308 | table = table + (((to + off) >> 20) & 0x7ff); | |
309 | ||
310 | /* Clear segment table entry in guest address space. */ | |
311 | flush |= gmap_unlink_segment(gmap, table); | |
e5098611 | 312 | *table = _SEGMENT_ENTRY_INVALID; |
e5992f2e | 313 | } |
05873df9 | 314 | out: |
cc772456 | 315 | spin_unlock(&gmap->mm->page_table_lock); |
e5992f2e MS |
316 | up_read(&gmap->mm->mmap_sem); |
317 | if (flush) | |
318 | gmap_flush_tlb(gmap); | |
319 | return 0; | |
320 | } | |
321 | EXPORT_SYMBOL_GPL(gmap_unmap_segment); | |
322 | ||
323 | /** | |
324 | * gmap_mmap_segment - map a segment to the guest address space | |
325 | * @gmap: pointer to the guest address space structure | |
326 | * @from: source address in the parent address space | |
327 | * @to: target address in the guest address space | |
328 | * | |
329 | * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. | |
330 | */ | |
331 | int gmap_map_segment(struct gmap *gmap, unsigned long from, | |
332 | unsigned long to, unsigned long len) | |
333 | { | |
334 | unsigned long *table; | |
335 | unsigned long off; | |
336 | int flush; | |
337 | ||
338 | if ((from | to | len) & (PMD_SIZE - 1)) | |
339 | return -EINVAL; | |
ee6ee55b | 340 | if (len == 0 || from + len > TASK_MAX_SIZE || |
e5992f2e MS |
341 | from + len < from || to + len < to) |
342 | return -EINVAL; | |
343 | ||
344 | flush = 0; | |
345 | down_read(&gmap->mm->mmap_sem); | |
cc772456 | 346 | spin_lock(&gmap->mm->page_table_lock); |
e5992f2e MS |
347 | for (off = 0; off < len; off += PMD_SIZE) { |
348 | /* Walk the gmap address space page table */ | |
349 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | |
e5098611 | 350 | if ((*table & _REGION_ENTRY_INVALID) && |
e5992f2e MS |
351 | gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) |
352 | goto out_unmap; | |
353 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
354 | table = table + (((to + off) >> 42) & 0x7ff); | |
e5098611 | 355 | if ((*table & _REGION_ENTRY_INVALID) && |
e5992f2e MS |
356 | gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) |
357 | goto out_unmap; | |
358 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
359 | table = table + (((to + off) >> 31) & 0x7ff); | |
e5098611 | 360 | if ((*table & _REGION_ENTRY_INVALID) && |
e5992f2e MS |
361 | gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) |
362 | goto out_unmap; | |
363 | table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); | |
364 | table = table + (((to + off) >> 20) & 0x7ff); | |
365 | ||
366 | /* Store 'from' address in an invalid segment table entry. */ | |
367 | flush |= gmap_unlink_segment(gmap, table); | |
e5098611 MS |
368 | *table = (from + off) | (_SEGMENT_ENTRY_INVALID | |
369 | _SEGMENT_ENTRY_PROTECT); | |
e5992f2e | 370 | } |
cc772456 | 371 | spin_unlock(&gmap->mm->page_table_lock); |
e5992f2e MS |
372 | up_read(&gmap->mm->mmap_sem); |
373 | if (flush) | |
374 | gmap_flush_tlb(gmap); | |
375 | return 0; | |
376 | ||
377 | out_unmap: | |
cc772456 | 378 | spin_unlock(&gmap->mm->page_table_lock); |
e5992f2e MS |
379 | up_read(&gmap->mm->mmap_sem); |
380 | gmap_unmap_segment(gmap, to, len); | |
381 | return -ENOMEM; | |
382 | } | |
383 | EXPORT_SYMBOL_GPL(gmap_map_segment); | |
384 | ||
c5034945 HC |
385 | static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap) |
386 | { | |
387 | unsigned long *table; | |
388 | ||
389 | table = gmap->table + ((address >> 53) & 0x7ff); | |
e5098611 | 390 | if (unlikely(*table & _REGION_ENTRY_INVALID)) |
c5034945 HC |
391 | return ERR_PTR(-EFAULT); |
392 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
393 | table = table + ((address >> 42) & 0x7ff); | |
e5098611 | 394 | if (unlikely(*table & _REGION_ENTRY_INVALID)) |
c5034945 HC |
395 | return ERR_PTR(-EFAULT); |
396 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
397 | table = table + ((address >> 31) & 0x7ff); | |
e5098611 | 398 | if (unlikely(*table & _REGION_ENTRY_INVALID)) |
c5034945 HC |
399 | return ERR_PTR(-EFAULT); |
400 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
401 | table = table + ((address >> 20) & 0x7ff); | |
402 | return table; | |
403 | } | |
404 | ||
405 | /** | |
406 | * __gmap_translate - translate a guest address to a user space address | |
407 | * @address: guest address | |
408 | * @gmap: pointer to guest mapping meta data structure | |
409 | * | |
410 | * Returns user space address which corresponds to the guest address or | |
411 | * -EFAULT if no such mapping exists. | |
412 | * This function does not establish potentially missing page table entries. | |
413 | * The mmap_sem of the mm that belongs to the address space must be held | |
414 | * when this function gets called. | |
415 | */ | |
416 | unsigned long __gmap_translate(unsigned long address, struct gmap *gmap) | |
417 | { | |
418 | unsigned long *segment_ptr, vmaddr, segment; | |
419 | struct gmap_pgtable *mp; | |
420 | struct page *page; | |
421 | ||
422 | current->thread.gmap_addr = address; | |
423 | segment_ptr = gmap_table_walk(address, gmap); | |
424 | if (IS_ERR(segment_ptr)) | |
425 | return PTR_ERR(segment_ptr); | |
426 | /* Convert the gmap address to an mm address. */ | |
427 | segment = *segment_ptr; | |
e5098611 | 428 | if (!(segment & _SEGMENT_ENTRY_INVALID)) { |
c5034945 HC |
429 | page = pfn_to_page(segment >> PAGE_SHIFT); |
430 | mp = (struct gmap_pgtable *) page->index; | |
431 | return mp->vmaddr | (address & ~PMD_MASK); | |
e5098611 | 432 | } else if (segment & _SEGMENT_ENTRY_PROTECT) { |
c5034945 HC |
433 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; |
434 | return vmaddr | (address & ~PMD_MASK); | |
435 | } | |
436 | return -EFAULT; | |
437 | } | |
438 | EXPORT_SYMBOL_GPL(__gmap_translate); | |
439 | ||
440 | /** | |
441 | * gmap_translate - translate a guest address to a user space address | |
442 | * @address: guest address | |
443 | * @gmap: pointer to guest mapping meta data structure | |
444 | * | |
445 | * Returns user space address which corresponds to the guest address or | |
446 | * -EFAULT if no such mapping exists. | |
447 | * This function does not establish potentially missing page table entries. | |
448 | */ | |
449 | unsigned long gmap_translate(unsigned long address, struct gmap *gmap) | |
450 | { | |
451 | unsigned long rc; | |
452 | ||
453 | down_read(&gmap->mm->mmap_sem); | |
454 | rc = __gmap_translate(address, gmap); | |
455 | up_read(&gmap->mm->mmap_sem); | |
456 | return rc; | |
457 | } | |
458 | EXPORT_SYMBOL_GPL(gmap_translate); | |
459 | ||
d3383632 MS |
460 | static int gmap_connect_pgtable(unsigned long address, unsigned long segment, |
461 | unsigned long *segment_ptr, struct gmap *gmap) | |
e5992f2e | 462 | { |
ab8e5235 | 463 | unsigned long vmaddr; |
c5034945 | 464 | struct vm_area_struct *vma; |
e5992f2e MS |
465 | struct gmap_pgtable *mp; |
466 | struct gmap_rmap *rmap; | |
c5034945 | 467 | struct mm_struct *mm; |
e5992f2e MS |
468 | struct page *page; |
469 | pgd_t *pgd; | |
470 | pud_t *pud; | |
471 | pmd_t *pmd; | |
472 | ||
ab8e5235 MS |
473 | mm = gmap->mm; |
474 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; | |
475 | vma = find_vma(mm, vmaddr); | |
476 | if (!vma || vma->vm_start > vmaddr) | |
477 | return -EFAULT; | |
478 | /* Walk the parent mm page table */ | |
479 | pgd = pgd_offset(mm, vmaddr); | |
480 | pud = pud_alloc(mm, pgd, vmaddr); | |
481 | if (!pud) | |
482 | return -ENOMEM; | |
483 | pmd = pmd_alloc(mm, pud, vmaddr); | |
484 | if (!pmd) | |
485 | return -ENOMEM; | |
486 | if (!pmd_present(*pmd) && | |
487 | __pte_alloc(mm, vma, pmd, vmaddr)) | |
488 | return -ENOMEM; | |
489 | /* pmd now points to a valid segment table entry. */ | |
490 | rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); | |
491 | if (!rmap) | |
492 | return -ENOMEM; | |
493 | /* Link gmap segment table entry location to page table. */ | |
494 | page = pmd_page(*pmd); | |
495 | mp = (struct gmap_pgtable *) page->index; | |
d3383632 | 496 | rmap->gmap = gmap; |
ab8e5235 | 497 | rmap->entry = segment_ptr; |
e86cbd87 | 498 | rmap->vmaddr = address & PMD_MASK; |
ab8e5235 MS |
499 | spin_lock(&mm->page_table_lock); |
500 | if (*segment_ptr == segment) { | |
501 | list_add(&rmap->list, &mp->mapper); | |
502 | /* Set gmap segment table entry to page table. */ | |
503 | *segment_ptr = pmd_val(*pmd) & PAGE_MASK; | |
504 | rmap = NULL; | |
505 | } | |
506 | spin_unlock(&mm->page_table_lock); | |
507 | kfree(rmap); | |
508 | return 0; | |
509 | } | |
510 | ||
511 | static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table) | |
512 | { | |
513 | struct gmap_rmap *rmap, *next; | |
514 | struct gmap_pgtable *mp; | |
515 | struct page *page; | |
516 | int flush; | |
517 | ||
518 | flush = 0; | |
519 | spin_lock(&mm->page_table_lock); | |
520 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
521 | mp = (struct gmap_pgtable *) page->index; | |
522 | list_for_each_entry_safe(rmap, next, &mp->mapper, list) { | |
e5098611 MS |
523 | *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID | |
524 | _SEGMENT_ENTRY_PROTECT); | |
ab8e5235 MS |
525 | list_del(&rmap->list); |
526 | kfree(rmap); | |
527 | flush = 1; | |
528 | } | |
529 | spin_unlock(&mm->page_table_lock); | |
530 | if (flush) | |
531 | __tlb_flush_global(); | |
532 | } | |
533 | ||
534 | /* | |
535 | * this function is assumed to be called with mmap_sem held | |
536 | */ | |
537 | unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) | |
538 | { | |
539 | unsigned long *segment_ptr, segment; | |
540 | struct gmap_pgtable *mp; | |
541 | struct page *page; | |
542 | int rc; | |
543 | ||
e5992f2e | 544 | current->thread.gmap_addr = address; |
c5034945 HC |
545 | segment_ptr = gmap_table_walk(address, gmap); |
546 | if (IS_ERR(segment_ptr)) | |
e5992f2e | 547 | return -EFAULT; |
e5992f2e | 548 | /* Convert the gmap address to an mm address. */ |
ab8e5235 MS |
549 | while (1) { |
550 | segment = *segment_ptr; | |
e5098611 | 551 | if (!(segment & _SEGMENT_ENTRY_INVALID)) { |
ab8e5235 MS |
552 | /* Page table is present */ |
553 | page = pfn_to_page(segment >> PAGE_SHIFT); | |
554 | mp = (struct gmap_pgtable *) page->index; | |
555 | return mp->vmaddr | (address & ~PMD_MASK); | |
556 | } | |
e5098611 | 557 | if (!(segment & _SEGMENT_ENTRY_PROTECT)) |
ab8e5235 MS |
558 | /* Nothing mapped in the gmap address space. */ |
559 | break; | |
d3383632 | 560 | rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap); |
ab8e5235 MS |
561 | if (rc) |
562 | return rc; | |
e5992f2e MS |
563 | } |
564 | return -EFAULT; | |
499069e1 CO |
565 | } |
566 | ||
567 | unsigned long gmap_fault(unsigned long address, struct gmap *gmap) | |
568 | { | |
569 | unsigned long rc; | |
570 | ||
571 | down_read(&gmap->mm->mmap_sem); | |
572 | rc = __gmap_fault(address, gmap); | |
573 | up_read(&gmap->mm->mmap_sem); | |
e5992f2e | 574 | |
499069e1 | 575 | return rc; |
e5992f2e MS |
576 | } |
577 | EXPORT_SYMBOL_GPL(gmap_fault); | |
578 | ||
388186bc CB |
579 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) |
580 | { | |
581 | ||
582 | unsigned long *table, address, size; | |
583 | struct vm_area_struct *vma; | |
584 | struct gmap_pgtable *mp; | |
585 | struct page *page; | |
586 | ||
587 | down_read(&gmap->mm->mmap_sem); | |
588 | address = from; | |
589 | while (address < to) { | |
590 | /* Walk the gmap address space page table */ | |
591 | table = gmap->table + ((address >> 53) & 0x7ff); | |
e5098611 | 592 | if (unlikely(*table & _REGION_ENTRY_INVALID)) { |
388186bc CB |
593 | address = (address + PMD_SIZE) & PMD_MASK; |
594 | continue; | |
595 | } | |
596 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
597 | table = table + ((address >> 42) & 0x7ff); | |
e5098611 | 598 | if (unlikely(*table & _REGION_ENTRY_INVALID)) { |
388186bc CB |
599 | address = (address + PMD_SIZE) & PMD_MASK; |
600 | continue; | |
601 | } | |
602 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
603 | table = table + ((address >> 31) & 0x7ff); | |
e5098611 | 604 | if (unlikely(*table & _REGION_ENTRY_INVALID)) { |
388186bc CB |
605 | address = (address + PMD_SIZE) & PMD_MASK; |
606 | continue; | |
607 | } | |
608 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | |
609 | table = table + ((address >> 20) & 0x7ff); | |
e5098611 | 610 | if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) { |
388186bc CB |
611 | address = (address + PMD_SIZE) & PMD_MASK; |
612 | continue; | |
613 | } | |
614 | page = pfn_to_page(*table >> PAGE_SHIFT); | |
615 | mp = (struct gmap_pgtable *) page->index; | |
616 | vma = find_vma(gmap->mm, mp->vmaddr); | |
617 | size = min(to - address, PMD_SIZE - (address & ~PMD_MASK)); | |
618 | zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK), | |
619 | size, NULL); | |
620 | address = (address + PMD_SIZE) & PMD_MASK; | |
621 | } | |
622 | up_read(&gmap->mm->mmap_sem); | |
623 | } | |
624 | EXPORT_SYMBOL_GPL(gmap_discard); | |
625 | ||
d3383632 MS |
626 | static LIST_HEAD(gmap_notifier_list); |
627 | static DEFINE_SPINLOCK(gmap_notifier_lock); | |
628 | ||
629 | /** | |
630 | * gmap_register_ipte_notifier - register a pte invalidation callback | |
631 | * @nb: pointer to the gmap notifier block | |
632 | */ | |
633 | void gmap_register_ipte_notifier(struct gmap_notifier *nb) | |
634 | { | |
635 | spin_lock(&gmap_notifier_lock); | |
636 | list_add(&nb->list, &gmap_notifier_list); | |
637 | spin_unlock(&gmap_notifier_lock); | |
638 | } | |
639 | EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier); | |
640 | ||
641 | /** | |
642 | * gmap_unregister_ipte_notifier - remove a pte invalidation callback | |
643 | * @nb: pointer to the gmap notifier block | |
644 | */ | |
645 | void gmap_unregister_ipte_notifier(struct gmap_notifier *nb) | |
646 | { | |
647 | spin_lock(&gmap_notifier_lock); | |
648 | list_del_init(&nb->list); | |
649 | spin_unlock(&gmap_notifier_lock); | |
650 | } | |
651 | EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier); | |
652 | ||
653 | /** | |
654 | * gmap_ipte_notify - mark a range of ptes for invalidation notification | |
655 | * @gmap: pointer to guest mapping meta data structure | |
656 | * @address: virtual address in the guest address space | |
657 | * @len: size of area | |
658 | * | |
659 | * Returns 0 if for each page in the given range a gmap mapping exists and | |
660 | * the invalidation notification could be set. If the gmap mapping is missing | |
661 | * for one or more pages -EFAULT is returned. If no memory could be allocated | |
662 | * -ENOMEM is returned. This function establishes missing page table entries. | |
663 | */ | |
664 | int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len) | |
665 | { | |
666 | unsigned long addr; | |
667 | spinlock_t *ptl; | |
668 | pte_t *ptep, entry; | |
669 | pgste_t pgste; | |
670 | int rc = 0; | |
671 | ||
672 | if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK)) | |
673 | return -EINVAL; | |
674 | down_read(&gmap->mm->mmap_sem); | |
675 | while (len) { | |
676 | /* Convert gmap address and connect the page tables */ | |
677 | addr = __gmap_fault(start, gmap); | |
678 | if (IS_ERR_VALUE(addr)) { | |
679 | rc = addr; | |
680 | break; | |
681 | } | |
682 | /* Get the page mapped */ | |
bb4b42ce | 683 | if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) { |
d3383632 MS |
684 | rc = -EFAULT; |
685 | break; | |
686 | } | |
687 | /* Walk the process page table, lock and get pte pointer */ | |
688 | ptep = get_locked_pte(gmap->mm, addr, &ptl); | |
689 | if (unlikely(!ptep)) | |
690 | continue; | |
691 | /* Set notification bit in the pgste of the pte */ | |
692 | entry = *ptep; | |
e5098611 | 693 | if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) { |
d3383632 | 694 | pgste = pgste_get_lock(ptep); |
0d0dafc1 | 695 | pgste_val(pgste) |= PGSTE_IN_BIT; |
d3383632 MS |
696 | pgste_set_unlock(ptep, pgste); |
697 | start += PAGE_SIZE; | |
698 | len -= PAGE_SIZE; | |
699 | } | |
700 | spin_unlock(ptl); | |
701 | } | |
702 | up_read(&gmap->mm->mmap_sem); | |
703 | return rc; | |
704 | } | |
705 | EXPORT_SYMBOL_GPL(gmap_ipte_notify); | |
706 | ||
707 | /** | |
708 | * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte. | |
709 | * @mm: pointer to the process mm_struct | |
710 | * @addr: virtual address in the process address space | |
711 | * @pte: pointer to the page table entry | |
712 | * | |
713 | * This function is assumed to be called with the page table lock held | |
714 | * for the pte to notify. | |
715 | */ | |
716 | void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte) | |
717 | { | |
718 | unsigned long segment_offset; | |
719 | struct gmap_notifier *nb; | |
720 | struct gmap_pgtable *mp; | |
721 | struct gmap_rmap *rmap; | |
722 | struct page *page; | |
723 | ||
724 | segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t)); | |
725 | segment_offset = segment_offset * (4096 / sizeof(pte_t)); | |
726 | page = pfn_to_page(__pa(pte) >> PAGE_SHIFT); | |
727 | mp = (struct gmap_pgtable *) page->index; | |
728 | spin_lock(&gmap_notifier_lock); | |
729 | list_for_each_entry(rmap, &mp->mapper, list) { | |
730 | list_for_each_entry(nb, &gmap_notifier_list, list) | |
731 | nb->notifier_call(rmap->gmap, | |
732 | rmap->vmaddr + segment_offset); | |
733 | } | |
734 | spin_unlock(&gmap_notifier_lock); | |
735 | } | |
736 | ||
3eabaee9 MS |
737 | static inline int page_table_with_pgste(struct page *page) |
738 | { | |
739 | return atomic_read(&page->_mapcount) == 0; | |
740 | } | |
741 | ||
e5992f2e MS |
742 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, |
743 | unsigned long vmaddr) | |
36409f63 MS |
744 | { |
745 | struct page *page; | |
746 | unsigned long *table; | |
e5992f2e | 747 | struct gmap_pgtable *mp; |
36409f63 MS |
748 | |
749 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); | |
750 | if (!page) | |
751 | return NULL; | |
e5992f2e MS |
752 | mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT); |
753 | if (!mp) { | |
754 | __free_page(page); | |
755 | return NULL; | |
756 | } | |
36409f63 | 757 | pgtable_page_ctor(page); |
e5992f2e MS |
758 | mp->vmaddr = vmaddr & PMD_MASK; |
759 | INIT_LIST_HEAD(&mp->mapper); | |
760 | page->index = (unsigned long) mp; | |
3eabaee9 | 761 | atomic_set(&page->_mapcount, 0); |
36409f63 | 762 | table = (unsigned long *) page_to_phys(page); |
e5098611 | 763 | clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); |
0944fe3f MS |
764 | clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT, |
765 | PAGE_SIZE/2); | |
36409f63 MS |
766 | return table; |
767 | } | |
768 | ||
769 | static inline void page_table_free_pgste(unsigned long *table) | |
770 | { | |
771 | struct page *page; | |
e5992f2e | 772 | struct gmap_pgtable *mp; |
36409f63 MS |
773 | |
774 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
e5992f2e MS |
775 | mp = (struct gmap_pgtable *) page->index; |
776 | BUG_ON(!list_empty(&mp->mapper)); | |
2320c579 | 777 | pgtable_page_dtor(page); |
36409f63 | 778 | atomic_set(&page->_mapcount, -1); |
e5992f2e | 779 | kfree(mp); |
36409f63 MS |
780 | __free_page(page); |
781 | } | |
36409f63 | 782 | |
24d5dd02 CB |
783 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
784 | unsigned long key, bool nq) | |
785 | { | |
786 | spinlock_t *ptl; | |
787 | pgste_t old, new; | |
788 | pte_t *ptep; | |
789 | ||
790 | down_read(&mm->mmap_sem); | |
791 | ptep = get_locked_pte(current->mm, addr, &ptl); | |
792 | if (unlikely(!ptep)) { | |
793 | up_read(&mm->mmap_sem); | |
794 | return -EFAULT; | |
795 | } | |
796 | ||
797 | new = old = pgste_get_lock(ptep); | |
798 | pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | | |
799 | PGSTE_ACC_BITS | PGSTE_FP_BIT); | |
800 | pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; | |
801 | pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; | |
802 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { | |
0944fe3f | 803 | unsigned long address, bits, skey; |
24d5dd02 CB |
804 | |
805 | address = pte_val(*ptep) & PAGE_MASK; | |
0944fe3f | 806 | skey = (unsigned long) page_get_storage_key(address); |
24d5dd02 | 807 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
0944fe3f | 808 | skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT); |
24d5dd02 | 809 | /* Set storage key ACC and FP */ |
0944fe3f | 810 | page_set_storage_key(address, skey, !nq); |
24d5dd02 CB |
811 | /* Merge host changed & referenced into pgste */ |
812 | pgste_val(new) |= bits << 52; | |
24d5dd02 CB |
813 | } |
814 | /* changing the guest storage key is considered a change of the page */ | |
815 | if ((pgste_val(new) ^ pgste_val(old)) & | |
816 | (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) | |
0944fe3f | 817 | pgste_val(new) |= PGSTE_HC_BIT; |
24d5dd02 CB |
818 | |
819 | pgste_set_unlock(ptep, new); | |
820 | pte_unmap_unlock(*ptep, ptl); | |
821 | up_read(&mm->mmap_sem); | |
822 | return 0; | |
823 | } | |
824 | EXPORT_SYMBOL(set_guest_storage_key); | |
825 | ||
e5992f2e MS |
826 | #else /* CONFIG_PGSTE */ |
827 | ||
3eabaee9 MS |
828 | static inline int page_table_with_pgste(struct page *page) |
829 | { | |
830 | return 0; | |
831 | } | |
832 | ||
e5992f2e MS |
833 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, |
834 | unsigned long vmaddr) | |
835 | { | |
944291de | 836 | return NULL; |
e5992f2e MS |
837 | } |
838 | ||
839 | static inline void page_table_free_pgste(unsigned long *table) | |
840 | { | |
841 | } | |
842 | ||
ab8e5235 MS |
843 | static inline void gmap_disconnect_pgtable(struct mm_struct *mm, |
844 | unsigned long *table) | |
e5992f2e MS |
845 | { |
846 | } | |
847 | ||
848 | #endif /* CONFIG_PGSTE */ | |
849 | ||
850 | static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) | |
851 | { | |
852 | unsigned int old, new; | |
853 | ||
854 | do { | |
855 | old = atomic_read(v); | |
856 | new = old ^ bits; | |
857 | } while (atomic_cmpxchg(v, old, new) != old); | |
858 | return new; | |
859 | } | |
860 | ||
861 | /* | |
862 | * page table entry allocation/free routines. | |
863 | */ | |
864 | unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) | |
3610cce8 | 865 | { |
41459d36 HC |
866 | unsigned long *uninitialized_var(table); |
867 | struct page *uninitialized_var(page); | |
36409f63 | 868 | unsigned int mask, bit; |
3610cce8 | 869 | |
36409f63 | 870 | if (mm_has_pgste(mm)) |
e5992f2e | 871 | return page_table_alloc_pgste(mm, vmaddr); |
36409f63 | 872 | /* Allocate fragments of a 4K page as 1K/2K page table */ |
80217147 | 873 | spin_lock_bh(&mm->context.list_lock); |
36409f63 | 874 | mask = FRAG_MASK; |
146e4b3c MS |
875 | if (!list_empty(&mm->context.pgtable_list)) { |
876 | page = list_first_entry(&mm->context.pgtable_list, | |
877 | struct page, lru); | |
36409f63 MS |
878 | table = (unsigned long *) page_to_phys(page); |
879 | mask = atomic_read(&page->_mapcount); | |
880 | mask = mask | (mask >> 4); | |
146e4b3c | 881 | } |
36409f63 | 882 | if ((mask & FRAG_MASK) == FRAG_MASK) { |
80217147 | 883 | spin_unlock_bh(&mm->context.list_lock); |
146e4b3c MS |
884 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
885 | if (!page) | |
3610cce8 | 886 | return NULL; |
146e4b3c | 887 | pgtable_page_ctor(page); |
36409f63 | 888 | atomic_set(&page->_mapcount, 1); |
146e4b3c | 889 | table = (unsigned long *) page_to_phys(page); |
e5098611 | 890 | clear_table(table, _PAGE_INVALID, PAGE_SIZE); |
80217147 | 891 | spin_lock_bh(&mm->context.list_lock); |
146e4b3c | 892 | list_add(&page->lru, &mm->context.pgtable_list); |
36409f63 MS |
893 | } else { |
894 | for (bit = 1; mask & bit; bit <<= 1) | |
895 | table += PTRS_PER_PTE; | |
896 | mask = atomic_xor_bits(&page->_mapcount, bit); | |
897 | if ((mask & FRAG_MASK) == FRAG_MASK) | |
898 | list_del(&page->lru); | |
3610cce8 | 899 | } |
80217147 | 900 | spin_unlock_bh(&mm->context.list_lock); |
3610cce8 MS |
901 | return table; |
902 | } | |
903 | ||
36409f63 | 904 | void page_table_free(struct mm_struct *mm, unsigned long *table) |
80217147 MS |
905 | { |
906 | struct page *page; | |
36409f63 | 907 | unsigned int bit, mask; |
80217147 | 908 | |
3eabaee9 MS |
909 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
910 | if (page_table_with_pgste(page)) { | |
ab8e5235 | 911 | gmap_disconnect_pgtable(mm, table); |
36409f63 | 912 | return page_table_free_pgste(table); |
e5992f2e | 913 | } |
36409f63 | 914 | /* Free 1K/2K page table fragment of a 4K page */ |
36409f63 MS |
915 | bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); |
916 | spin_lock_bh(&mm->context.list_lock); | |
917 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) | |
918 | list_del(&page->lru); | |
919 | mask = atomic_xor_bits(&page->_mapcount, bit); | |
920 | if (mask & FRAG_MASK) | |
921 | list_add(&page->lru, &mm->context.pgtable_list); | |
922 | spin_unlock_bh(&mm->context.list_lock); | |
923 | if (mask == 0) { | |
80217147 | 924 | pgtable_page_dtor(page); |
36409f63 | 925 | atomic_set(&page->_mapcount, -1); |
80217147 MS |
926 | __free_page(page); |
927 | } | |
928 | } | |
929 | ||
36409f63 | 930 | static void __page_table_free_rcu(void *table, unsigned bit) |
3610cce8 | 931 | { |
146e4b3c | 932 | struct page *page; |
3610cce8 | 933 | |
36409f63 MS |
934 | if (bit == FRAG_MASK) |
935 | return page_table_free_pgste(table); | |
36409f63 | 936 | /* Free 1K/2K page table fragment of a 4K page */ |
146e4b3c | 937 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
36409f63 | 938 | if (atomic_xor_bits(&page->_mapcount, bit) == 0) { |
146e4b3c | 939 | pgtable_page_dtor(page); |
36409f63 | 940 | atomic_set(&page->_mapcount, -1); |
146e4b3c MS |
941 | __free_page(page); |
942 | } | |
943 | } | |
3610cce8 | 944 | |
36409f63 | 945 | void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) |
80217147 | 946 | { |
36409f63 | 947 | struct mm_struct *mm; |
80217147 | 948 | struct page *page; |
36409f63 | 949 | unsigned int bit, mask; |
80217147 | 950 | |
36409f63 | 951 | mm = tlb->mm; |
3eabaee9 MS |
952 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
953 | if (page_table_with_pgste(page)) { | |
ab8e5235 | 954 | gmap_disconnect_pgtable(mm, table); |
36409f63 MS |
955 | table = (unsigned long *) (__pa(table) | FRAG_MASK); |
956 | tlb_remove_table(tlb, table); | |
957 | return; | |
80217147 | 958 | } |
36409f63 | 959 | bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); |
80217147 | 960 | spin_lock_bh(&mm->context.list_lock); |
36409f63 MS |
961 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) |
962 | list_del(&page->lru); | |
963 | mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4)); | |
964 | if (mask & FRAG_MASK) | |
965 | list_add_tail(&page->lru, &mm->context.pgtable_list); | |
80217147 | 966 | spin_unlock_bh(&mm->context.list_lock); |
36409f63 MS |
967 | table = (unsigned long *) (__pa(table) | (bit << 4)); |
968 | tlb_remove_table(tlb, table); | |
969 | } | |
970 | ||
63df41d6 | 971 | static void __tlb_remove_table(void *_table) |
36409f63 | 972 | { |
e73b7fff MS |
973 | const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK; |
974 | void *table = (void *)((unsigned long) _table & ~mask); | |
975 | unsigned type = (unsigned long) _table & mask; | |
36409f63 MS |
976 | |
977 | if (type) | |
978 | __page_table_free_rcu(table, type); | |
979 | else | |
980 | free_pages((unsigned long) table, ALLOC_ORDER); | |
80217147 MS |
981 | } |
982 | ||
cd94154c MS |
983 | static void tlb_remove_table_smp_sync(void *arg) |
984 | { | |
985 | /* Simply deliver the interrupt */ | |
986 | } | |
987 | ||
988 | static void tlb_remove_table_one(void *table) | |
989 | { | |
990 | /* | |
991 | * This isn't an RCU grace period and hence the page-tables cannot be | |
992 | * assumed to be actually RCU-freed. | |
993 | * | |
994 | * It is however sufficient for software page-table walkers that rely | |
995 | * on IRQ disabling. See the comment near struct mmu_table_batch. | |
996 | */ | |
997 | smp_call_function(tlb_remove_table_smp_sync, NULL, 1); | |
998 | __tlb_remove_table(table); | |
999 | } | |
1000 | ||
1001 | static void tlb_remove_table_rcu(struct rcu_head *head) | |
1002 | { | |
1003 | struct mmu_table_batch *batch; | |
1004 | int i; | |
1005 | ||
1006 | batch = container_of(head, struct mmu_table_batch, rcu); | |
1007 | ||
1008 | for (i = 0; i < batch->nr; i++) | |
1009 | __tlb_remove_table(batch->tables[i]); | |
1010 | ||
1011 | free_page((unsigned long)batch); | |
1012 | } | |
1013 | ||
1014 | void tlb_table_flush(struct mmu_gather *tlb) | |
1015 | { | |
1016 | struct mmu_table_batch **batch = &tlb->batch; | |
1017 | ||
1018 | if (*batch) { | |
cd94154c MS |
1019 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); |
1020 | *batch = NULL; | |
1021 | } | |
1022 | } | |
1023 | ||
1024 | void tlb_remove_table(struct mmu_gather *tlb, void *table) | |
1025 | { | |
1026 | struct mmu_table_batch **batch = &tlb->batch; | |
1027 | ||
5c474a1e | 1028 | tlb->mm->context.flush_mm = 1; |
cd94154c MS |
1029 | if (*batch == NULL) { |
1030 | *batch = (struct mmu_table_batch *) | |
1031 | __get_free_page(GFP_NOWAIT | __GFP_NOWARN); | |
1032 | if (*batch == NULL) { | |
5c474a1e | 1033 | __tlb_flush_mm_lazy(tlb->mm); |
cd94154c MS |
1034 | tlb_remove_table_one(table); |
1035 | return; | |
1036 | } | |
1037 | (*batch)->nr = 0; | |
1038 | } | |
1039 | (*batch)->tables[(*batch)->nr++] = table; | |
1040 | if ((*batch)->nr == MAX_TABLE_BATCH) | |
5c474a1e | 1041 | tlb_flush_mmu(tlb); |
cd94154c | 1042 | } |
36409f63 | 1043 | |
274023da | 1044 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
3eabaee9 | 1045 | static inline void thp_split_vma(struct vm_area_struct *vma) |
274023da GS |
1046 | { |
1047 | unsigned long addr; | |
274023da | 1048 | |
3eabaee9 MS |
1049 | for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) |
1050 | follow_page(vma, addr, FOLL_SPLIT); | |
274023da GS |
1051 | } |
1052 | ||
3eabaee9 | 1053 | static inline void thp_split_mm(struct mm_struct *mm) |
274023da | 1054 | { |
3eabaee9 | 1055 | struct vm_area_struct *vma; |
274023da | 1056 | |
3eabaee9 | 1057 | for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { |
274023da GS |
1058 | thp_split_vma(vma); |
1059 | vma->vm_flags &= ~VM_HUGEPAGE; | |
1060 | vma->vm_flags |= VM_NOHUGEPAGE; | |
274023da | 1061 | } |
3eabaee9 MS |
1062 | mm->def_flags |= VM_NOHUGEPAGE; |
1063 | } | |
1064 | #else | |
1065 | static inline void thp_split_mm(struct mm_struct *mm) | |
1066 | { | |
274023da GS |
1067 | } |
1068 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
1069 | ||
3eabaee9 MS |
1070 | static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb, |
1071 | struct mm_struct *mm, pud_t *pud, | |
1072 | unsigned long addr, unsigned long end) | |
1073 | { | |
1074 | unsigned long next, *table, *new; | |
1075 | struct page *page; | |
1076 | pmd_t *pmd; | |
1077 | ||
1078 | pmd = pmd_offset(pud, addr); | |
1079 | do { | |
1080 | next = pmd_addr_end(addr, end); | |
1081 | again: | |
1082 | if (pmd_none_or_clear_bad(pmd)) | |
1083 | continue; | |
1084 | table = (unsigned long *) pmd_deref(*pmd); | |
1085 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
1086 | if (page_table_with_pgste(page)) | |
1087 | continue; | |
1088 | /* Allocate new page table with pgstes */ | |
1089 | new = page_table_alloc_pgste(mm, addr); | |
be39f196 DD |
1090 | if (!new) |
1091 | return -ENOMEM; | |
1092 | ||
3eabaee9 MS |
1093 | spin_lock(&mm->page_table_lock); |
1094 | if (likely((unsigned long *) pmd_deref(*pmd) == table)) { | |
1095 | /* Nuke pmd entry pointing to the "short" page table */ | |
1096 | pmdp_flush_lazy(mm, addr, pmd); | |
1097 | pmd_clear(pmd); | |
1098 | /* Copy ptes from old table to new table */ | |
1099 | memcpy(new, table, PAGE_SIZE/2); | |
1100 | clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); | |
1101 | /* Establish new table */ | |
1102 | pmd_populate(mm, pmd, (pte_t *) new); | |
1103 | /* Free old table with rcu, there might be a walker! */ | |
1104 | page_table_free_rcu(tlb, table); | |
1105 | new = NULL; | |
1106 | } | |
1107 | spin_unlock(&mm->page_table_lock); | |
1108 | if (new) { | |
1109 | page_table_free_pgste(new); | |
1110 | goto again; | |
1111 | } | |
1112 | } while (pmd++, addr = next, addr != end); | |
1113 | ||
1114 | return addr; | |
1115 | } | |
1116 | ||
1117 | static unsigned long page_table_realloc_pud(struct mmu_gather *tlb, | |
1118 | struct mm_struct *mm, pgd_t *pgd, | |
1119 | unsigned long addr, unsigned long end) | |
1120 | { | |
1121 | unsigned long next; | |
1122 | pud_t *pud; | |
1123 | ||
1124 | pud = pud_offset(pgd, addr); | |
1125 | do { | |
1126 | next = pud_addr_end(addr, end); | |
1127 | if (pud_none_or_clear_bad(pud)) | |
1128 | continue; | |
1129 | next = page_table_realloc_pmd(tlb, mm, pud, addr, next); | |
be39f196 DD |
1130 | if (unlikely(IS_ERR_VALUE(next))) |
1131 | return next; | |
3eabaee9 MS |
1132 | } while (pud++, addr = next, addr != end); |
1133 | ||
1134 | return addr; | |
1135 | } | |
1136 | ||
be39f196 DD |
1137 | static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm, |
1138 | unsigned long addr, unsigned long end) | |
3eabaee9 MS |
1139 | { |
1140 | unsigned long next; | |
1141 | pgd_t *pgd; | |
1142 | ||
1143 | pgd = pgd_offset(mm, addr); | |
1144 | do { | |
1145 | next = pgd_addr_end(addr, end); | |
1146 | if (pgd_none_or_clear_bad(pgd)) | |
1147 | continue; | |
1148 | next = page_table_realloc_pud(tlb, mm, pgd, addr, next); | |
be39f196 DD |
1149 | if (unlikely(IS_ERR_VALUE(next))) |
1150 | return next; | |
3eabaee9 | 1151 | } while (pgd++, addr = next, addr != end); |
be39f196 DD |
1152 | |
1153 | return 0; | |
3eabaee9 MS |
1154 | } |
1155 | ||
402b0862 CO |
1156 | /* |
1157 | * switch on pgstes for its userspace process (for kvm) | |
1158 | */ | |
1159 | int s390_enable_sie(void) | |
1160 | { | |
1161 | struct task_struct *tsk = current; | |
3eabaee9 MS |
1162 | struct mm_struct *mm = tsk->mm; |
1163 | struct mmu_gather tlb; | |
402b0862 | 1164 | |
74b6b522 | 1165 | /* Do we have pgstes? if yes, we are done */ |
36409f63 | 1166 | if (mm_has_pgste(tsk->mm)) |
74b6b522 | 1167 | return 0; |
402b0862 | 1168 | |
3eabaee9 | 1169 | down_write(&mm->mmap_sem); |
274023da GS |
1170 | /* split thp mappings and disable thp for future mappings */ |
1171 | thp_split_mm(mm); | |
3eabaee9 | 1172 | /* Reallocate the page tables with pgstes */ |
ae7a835c | 1173 | tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE); |
be39f196 DD |
1174 | if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE)) |
1175 | mm->context.has_pgste = 1; | |
ae7a835c | 1176 | tlb_finish_mmu(&tlb, 0, TASK_SIZE); |
3eabaee9 MS |
1177 | up_write(&mm->mmap_sem); |
1178 | return mm->context.has_pgste ? 0 : -ENOMEM; | |
402b0862 CO |
1179 | } |
1180 | EXPORT_SYMBOL_GPL(s390_enable_sie); | |
7db11a36 | 1181 | |
75077afb | 1182 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1ae1c1d0 GS |
1183 | int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, |
1184 | pmd_t *pmdp) | |
1185 | { | |
1186 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
1187 | /* No need to flush TLB | |
1188 | * On s390 reference bits are in storage key and never in TLB */ | |
1189 | return pmdp_test_and_clear_young(vma, address, pmdp); | |
1190 | } | |
1191 | ||
1192 | int pmdp_set_access_flags(struct vm_area_struct *vma, | |
1193 | unsigned long address, pmd_t *pmdp, | |
1194 | pmd_t entry, int dirty) | |
1195 | { | |
1196 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
1197 | ||
1198 | if (pmd_same(*pmdp, entry)) | |
1199 | return 0; | |
1200 | pmdp_invalidate(vma, address, pmdp); | |
1201 | set_pmd_at(vma->vm_mm, address, pmdp, entry); | |
1202 | return 1; | |
1203 | } | |
1204 | ||
75077afb GS |
1205 | static void pmdp_splitting_flush_sync(void *arg) |
1206 | { | |
1207 | /* Simply deliver the interrupt */ | |
1208 | } | |
1209 | ||
1210 | void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, | |
1211 | pmd_t *pmdp) | |
1212 | { | |
1213 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
1214 | if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT, | |
1215 | (unsigned long *) pmdp)) { | |
1216 | /* need to serialize against gup-fast (IRQ disabled) */ | |
1217 | smp_call_function(pmdp_splitting_flush_sync, NULL, 1); | |
1218 | } | |
1219 | } | |
9501d09f | 1220 | |
6b0b50b0 AK |
1221 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
1222 | pgtable_t pgtable) | |
9501d09f GS |
1223 | { |
1224 | struct list_head *lh = (struct list_head *) pgtable; | |
1225 | ||
1226 | assert_spin_locked(&mm->page_table_lock); | |
1227 | ||
1228 | /* FIFO */ | |
1229 | if (!mm->pmd_huge_pte) | |
1230 | INIT_LIST_HEAD(lh); | |
1231 | else | |
1232 | list_add(lh, (struct list_head *) mm->pmd_huge_pte); | |
1233 | mm->pmd_huge_pte = pgtable; | |
1234 | } | |
1235 | ||
6b0b50b0 | 1236 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
9501d09f GS |
1237 | { |
1238 | struct list_head *lh; | |
1239 | pgtable_t pgtable; | |
1240 | pte_t *ptep; | |
1241 | ||
1242 | assert_spin_locked(&mm->page_table_lock); | |
1243 | ||
1244 | /* FIFO */ | |
1245 | pgtable = mm->pmd_huge_pte; | |
1246 | lh = (struct list_head *) pgtable; | |
1247 | if (list_empty(lh)) | |
1248 | mm->pmd_huge_pte = NULL; | |
1249 | else { | |
1250 | mm->pmd_huge_pte = (pgtable_t) lh->next; | |
1251 | list_del(lh); | |
1252 | } | |
1253 | ptep = (pte_t *) pgtable; | |
e5098611 | 1254 | pte_val(*ptep) = _PAGE_INVALID; |
9501d09f | 1255 | ptep++; |
e5098611 | 1256 | pte_val(*ptep) = _PAGE_INVALID; |
9501d09f GS |
1257 | return pgtable; |
1258 | } | |
75077afb | 1259 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |