1 From 47a3e75a6d3db759a5fce3e922f144af0d6f1d38 Mon Sep 17 00:00:00 2001
2 From: Seth Forshee <seth.forshee@canonical.com>
3 Date: Wed, 3 May 2017 08:34:56 -0500
4 Subject: [PATCH 3/4] Revert "mm: introduce page_vma_mapped_walk()"
6 This reverts commit 6e2a092a48d38cfc0f06bdb523014acbfeba7b2e.
8 Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
10 include/linux/rmap.h | 26 -------
12 mm/huge_memory.c | 9 +--
13 mm/page_vma_mapped.c | 188 ---------------------------------------------------
14 4 files changed, 5 insertions(+), 224 deletions(-)
15 delete mode 100644 mm/page_vma_mapped.c
17 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
18 index b76343610653..15321fb1df6b 100644
19 --- a/include/linux/rmap.h
20 +++ b/include/linux/rmap.h
23 #include <linux/rwsem.h>
24 #include <linux/memcontrol.h>
25 -#include <linux/highmem.h>
28 * The anon_vma heads a list of private "related" vmas, to scan if
29 @@ -233,31 +232,6 @@ static inline bool page_check_address_transhuge(struct page *page,
33 -/* Avoid racy checks */
34 -#define PVMW_SYNC (1 << 0)
35 -/* Look for migarion entries rather than present PTEs */
36 -#define PVMW_MIGRATION (1 << 1)
38 -struct page_vma_mapped_walk {
40 - struct vm_area_struct *vma;
41 - unsigned long address;
48 -static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
51 - pte_unmap(pvmw->pte);
53 - spin_unlock(pvmw->ptl);
56 -bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
59 * Used by swapoff to help locate where page is expected in vma.
61 diff --git a/mm/Makefile b/mm/Makefile
62 index 474fa3431059..14fa1c83b504 100644
65 @@ -23,10 +23,8 @@ KCOV_INSTRUMENT_vmstat.o := n
68 mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
69 - mlock.o mmap.o mprotect.o mremap.o msync.o \
70 - page_vma_mapped.o pagewalk.o pgtable-generic.o \
73 + mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
74 + vmalloc.o pagewalk.o pgtable-generic.o
76 ifdef CONFIG_CROSS_MEMORY_ATTACH
77 mmu-$(CONFIG_MMU) += process_vm_access.o
78 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
79 index 87303c6bacf4..e6de801fa477 100644
80 --- a/mm/huge_memory.c
81 +++ b/mm/huge_memory.c
82 @@ -1878,12 +1878,9 @@ static void freeze_page(struct page *page)
83 static void unfreeze_page(struct page *page)
86 - if (PageTransHuge(page)) {
87 - remove_migration_ptes(page, page, true);
89 - for (i = 0; i < HPAGE_PMD_NR; i++)
90 - remove_migration_ptes(page + i, page + i, true);
93 + for (i = 0; i < HPAGE_PMD_NR; i++)
94 + remove_migration_ptes(page + i, page + i, true);
97 static void __split_huge_page_tail(struct page *head, int tail,
98 diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
99 deleted file mode 100644
100 index dc1a54826cf2..000000000000
101 --- a/mm/page_vma_mapped.c
104 -#include <linux/mm.h>
105 -#include <linux/rmap.h>
106 -#include <linux/hugetlb.h>
107 -#include <linux/swap.h>
108 -#include <linux/swapops.h>
110 -#include "internal.h"
112 -static inline bool check_pmd(struct page_vma_mapped_walk *pvmw)
116 - * Make sure we don't re-load pmd between present and !trans_huge check.
117 - * We need a consistent view.
119 - pmde = READ_ONCE(*pvmw->pmd);
120 - return pmd_present(pmde) && !pmd_trans_huge(pmde);
123 -static inline bool not_found(struct page_vma_mapped_walk *pvmw)
125 - page_vma_mapped_walk_done(pvmw);
129 -static bool map_pte(struct page_vma_mapped_walk *pvmw)
131 - pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
132 - if (!(pvmw->flags & PVMW_SYNC)) {
133 - if (pvmw->flags & PVMW_MIGRATION) {
134 - if (!is_swap_pte(*pvmw->pte))
137 - if (!pte_present(*pvmw->pte))
141 - pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
142 - spin_lock(pvmw->ptl);
146 -static bool check_pte(struct page_vma_mapped_walk *pvmw)
148 - if (pvmw->flags & PVMW_MIGRATION) {
149 -#ifdef CONFIG_MIGRATION
151 - if (!is_swap_pte(*pvmw->pte))
153 - entry = pte_to_swp_entry(*pvmw->pte);
154 - if (!is_migration_entry(entry))
156 - if (migration_entry_to_page(entry) - pvmw->page >=
157 - hpage_nr_pages(pvmw->page)) {
160 - if (migration_entry_to_page(entry) < pvmw->page)
166 - if (!pte_present(*pvmw->pte))
169 - /* THP can be referenced by any subpage */
170 - if (pte_page(*pvmw->pte) - pvmw->page >=
171 - hpage_nr_pages(pvmw->page)) {
174 - if (pte_page(*pvmw->pte) < pvmw->page)
182 - * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
184 - * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
185 - * must be set. pmd, pte and ptl must be NULL.
187 - * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
188 - * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
189 - * adjusted if needed (for PTE-mapped THPs).
191 - * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
192 - * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
193 - * a loop to find all PTEs that map the THP.
195 - * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
196 - * regardless of which page table level the page is mapped at. @pvmw->pmd is
199 - * Retruns false if there are no more page table entries for the page in
200 - * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
202 - * If you need to stop the walk before page_vma_mapped_walk() returned false,
203 - * use page_vma_mapped_walk_done(). It will do the housekeeping.
205 -bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
207 - struct mm_struct *mm = pvmw->vma->vm_mm;
208 - struct page *page = pvmw->page;
212 - /* The only possible pmd mapping has been handled on last iteration */
213 - if (pvmw->pmd && !pvmw->pte)
214 - return not_found(pvmw);
216 - /* Only for THP, seek to next pte entry makes sense */
218 - if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
219 - return not_found(pvmw);
223 - if (unlikely(PageHuge(pvmw->page))) {
224 - /* when pud is not present, pte will be NULL */
225 - pvmw->pte = huge_pte_offset(mm, pvmw->address);
229 - pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
230 - spin_lock(pvmw->ptl);
231 - if (!check_pte(pvmw))
232 - return not_found(pvmw);
236 - pgd = pgd_offset(mm, pvmw->address);
237 - if (!pgd_present(*pgd))
239 - pud = pud_offset(pgd, pvmw->address);
240 - if (!pud_present(*pud))
242 - pvmw->pmd = pmd_offset(pud, pvmw->address);
243 - if (pmd_trans_huge(*pvmw->pmd)) {
244 - pvmw->ptl = pmd_lock(mm, pvmw->pmd);
245 - if (!pmd_present(*pvmw->pmd))
246 - return not_found(pvmw);
247 - if (likely(pmd_trans_huge(*pvmw->pmd))) {
248 - if (pvmw->flags & PVMW_MIGRATION)
249 - return not_found(pvmw);
250 - if (pmd_page(*pvmw->pmd) != page)
251 - return not_found(pvmw);
254 - /* THP pmd was split under us: handle on pte level */
255 - spin_unlock(pvmw->ptl);
259 - if (!check_pmd(pvmw))
262 - if (!map_pte(pvmw))
265 - if (check_pte(pvmw))
268 - pvmw->address += PAGE_SIZE;
269 - if (pvmw->address >=
270 - __vma_address(pvmw->page, pvmw->vma) +
271 - hpage_nr_pages(pvmw->page) * PAGE_SIZE)
272 - return not_found(pvmw);
273 - /* Did we cross page table boundary? */
274 - if (pvmw->address % PMD_SIZE == 0) {
275 - pte_unmap(pvmw->pte);
277 - spin_unlock(pvmw->ptl);
284 - } while (pte_none(*pvmw->pte));
287 - pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
288 - spin_lock(pvmw->ptl);