]>
Commit | Line | Data |
---|---|---|
b20a3503 CL |
1 | #ifndef _LINUX_MIGRATE_H |
2 | #define _LINUX_MIGRATE_H | |
3 | ||
b20a3503 | 4 | #include <linux/mm.h> |
906e0be1 | 5 | #include <linux/mempolicy.h> |
6536e312 | 6 | #include <linux/migrate_mode.h> |
b20a3503 | 7 | |
742755a1 | 8 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); |
95a402c3 | 9 | |
78bd5209 RA |
10 | /* |
11 | * Return values from addresss_space_operations.migratepage(): | |
12 | * - negative errno on page migration failure; | |
13 | * - zero on page migration success; | |
18468d93 RA |
14 | * |
15 | * The balloon page migration introduces this special case where a 'distinct' | |
16 | * return code is used to flag a successful page migration to unmap_and_move(). | |
17 | * This approach is necessary because page migration can race against balloon | |
18 | * deflation procedure, and for such case we could introduce a nasty page leak | |
19 | * if a successfully migrated balloon page gets released concurrently with | |
20 | * migration's unmap_and_move() wrap-up steps. | |
78bd5209 RA |
21 | */ |
22 | #define MIGRATEPAGE_SUCCESS 0 | |
18468d93 RA |
23 | #define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page |
24 | * sucessful migration case. | |
25 | */ | |
7b2a2d4a MG |
26 | enum migrate_reason { |
27 | MR_COMPACTION, | |
28 | MR_MEMORY_FAILURE, | |
29 | MR_MEMORY_HOTPLUG, | |
30 | MR_SYSCALL, /* also applies to cpusets */ | |
31 | MR_MEMPOLICY_MBIND, | |
7039e1db | 32 | MR_NUMA_MISPLACED, |
7b2a2d4a MG |
33 | MR_CMA |
34 | }; | |
78bd5209 | 35 | |
906e0be1 | 36 | #ifdef CONFIG_MIGRATION |
64cdd548 | 37 | |
e13861d8 | 38 | extern void putback_lru_pages(struct list_head *l); |
5733c7d1 | 39 | extern void putback_movable_pages(struct list_head *l); |
2d1db3b1 | 40 | extern int migrate_page(struct address_space *, |
a6bc32b8 | 41 | struct page *, struct page *, enum migrate_mode); |
62b61f61 | 42 | extern int migrate_pages(struct list_head *l, new_page_t x, |
9c620e2b | 43 | unsigned long private, enum migrate_mode mode, int reason); |
189ebff2 | 44 | extern int migrate_huge_page(struct page *, new_page_t x, |
9c620e2b | 45 | unsigned long private, enum migrate_mode mode); |
95a402c3 | 46 | |
2d1db3b1 CL |
47 | extern int fail_migrate_page(struct address_space *, |
48 | struct page *, struct page *); | |
b20a3503 CL |
49 | |
50 | extern int migrate_prep(void); | |
748446bb | 51 | extern int migrate_prep_local(void); |
7b2259b3 CL |
52 | extern int migrate_vmas(struct mm_struct *mm, |
53 | const nodemask_t *from, const nodemask_t *to, | |
54 | unsigned long flags); | |
290408d4 NH |
55 | extern void migrate_page_copy(struct page *newpage, struct page *page); |
56 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, | |
57 | struct page *newpage, struct page *page); | |
b20a3503 | 58 | #else |
64cdd548 | 59 | |
e13861d8 | 60 | static inline void putback_lru_pages(struct list_head *l) {} |
5733c7d1 | 61 | static inline void putback_movable_pages(struct list_head *l) {} |
95a402c3 | 62 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
9c620e2b HD |
63 | unsigned long private, enum migrate_mode mode, int reason) |
64 | { return -ENOSYS; } | |
189ebff2 | 65 | static inline int migrate_huge_page(struct page *page, new_page_t x, |
9c620e2b HD |
66 | unsigned long private, enum migrate_mode mode) |
67 | { return -ENOSYS; } | |
9bf9e89c | 68 | |
b20a3503 | 69 | static inline int migrate_prep(void) { return -ENOSYS; } |
748446bb | 70 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
b20a3503 | 71 | |
7b2259b3 CL |
72 | static inline int migrate_vmas(struct mm_struct *mm, |
73 | const nodemask_t *from, const nodemask_t *to, | |
74 | unsigned long flags) | |
75 | { | |
76 | return -ENOSYS; | |
77 | } | |
78 | ||
290408d4 NH |
79 | static inline void migrate_page_copy(struct page *newpage, |
80 | struct page *page) {} | |
81 | ||
6f39ce05 | 82 | static inline int migrate_huge_page_move_mapping(struct address_space *mapping, |
290408d4 NH |
83 | struct page *newpage, struct page *page) |
84 | { | |
85 | return -ENOSYS; | |
86 | } | |
87 | ||
b20a3503 CL |
88 | /* Possible settings for the migrate_page() method in address_operations */ |
89 | #define migrate_page NULL | |
90 | #define fail_migrate_page NULL | |
91 | ||
92 | #endif /* CONFIG_MIGRATION */ | |
7039e1db PZ |
93 | |
94 | #ifdef CONFIG_NUMA_BALANCING | |
95 | extern int migrate_misplaced_page(struct page *page, int node); | |
e14808b4 MG |
96 | extern int migrate_misplaced_page(struct page *page, int node); |
97 | extern bool migrate_ratelimited(int node); | |
7039e1db PZ |
98 | #else |
99 | static inline int migrate_misplaced_page(struct page *page, int node) | |
100 | { | |
101 | return -EAGAIN; /* can't migrate now */ | |
102 | } | |
e14808b4 MG |
103 | static inline bool migrate_ratelimited(int node) |
104 | { | |
105 | return false; | |
106 | } | |
220018d3 | 107 | #endif /* CONFIG_NUMA_BALANCING */ |
b32967ff | 108 | |
220018d3 MG |
109 | #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
110 | extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |
111 | struct vm_area_struct *vma, | |
112 | pmd_t *pmd, pmd_t entry, | |
113 | unsigned long address, | |
114 | struct page *page, int node); | |
115 | #else | |
b32967ff MG |
116 | static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, |
117 | struct vm_area_struct *vma, | |
118 | pmd_t *pmd, pmd_t entry, | |
119 | unsigned long address, | |
120 | struct page *page, int node) | |
121 | { | |
122 | return -EAGAIN; | |
123 | } | |
220018d3 | 124 | #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ |
7039e1db | 125 | |
b20a3503 | 126 | #endif /* _LINUX_MIGRATE_H */ |