1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_MEMORY_HOTPLUG_H
3 #define __LINUX_MEMORY_HOTPLUG_H
5 #include <linux/mmzone.h>
6 #include <linux/spinlock.h>
7 #include <linux/notifier.h>
18 #ifdef CONFIG_MEMORY_HOTPLUG
20 * Return page for the valid pfn only if the page is online. All pfn
21 * walkers which rely on the fully initialized page->flags and others
22 * should use this rather than pfn_valid && pfn_to_page
24 #define pfn_to_online_page(pfn) \
26 struct page *___page = NULL; \
27 unsigned long ___pfn = pfn; \
28 unsigned long ___nr = pfn_to_section_nr(___pfn); \
30 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
31 pfn_valid_within(___pfn)) \
32 ___page = pfn_to_page(___pfn); \
37 * Types for free bootmem stored in page->lru.next. These have to be in
38 * some random range in unsigned long space for debugging purposes.
41 MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE
= 12,
42 SECTION_INFO
= MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE
,
45 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE
= NODE_INFO
,
48 /* Types for control the zone type of onlined and offlined memory */
57 * Restrictions for the memory hotplug:
59 * altmap: alternative allocator for memmap array
61 struct mhp_restrictions
{
63 struct vmem_altmap
*altmap
;
67 * Zone resizing functions
69 * Note: any attempt to resize a zone should has pgdat_resize_lock()
70 * zone_span_writelock() both held. This ensure the size of a zone
71 * can't be changed while pgdat_resize_lock() held.
73 static inline unsigned zone_span_seqbegin(struct zone
*zone
)
75 return read_seqbegin(&zone
->span_seqlock
);
77 static inline int zone_span_seqretry(struct zone
*zone
, unsigned iv
)
79 return read_seqretry(&zone
->span_seqlock
, iv
);
81 static inline void zone_span_writelock(struct zone
*zone
)
83 write_seqlock(&zone
->span_seqlock
);
85 static inline void zone_span_writeunlock(struct zone
*zone
)
87 write_sequnlock(&zone
->span_seqlock
);
89 static inline void zone_seqlock_init(struct zone
*zone
)
91 seqlock_init(&zone
->span_seqlock
);
93 extern int zone_grow_free_lists(struct zone
*zone
, unsigned long new_nr_pages
);
94 extern int zone_grow_waitqueues(struct zone
*zone
, unsigned long nr_pages
);
95 extern int add_one_highpage(struct page
*page
, int pfn
, int bad_ppro
);
96 /* VM interface that may be used by firmware interface */
97 extern int online_pages(unsigned long, unsigned long, int);
98 extern int test_pages_in_a_zone(unsigned long start_pfn
, unsigned long end_pfn
,
99 unsigned long *valid_start
, unsigned long *valid_end
);
100 extern unsigned long __offline_isolated_pages(unsigned long start_pfn
,
101 unsigned long end_pfn
);
103 typedef void (*online_page_callback_t
)(struct page
*page
, unsigned int order
);
105 extern int set_online_page_callback(online_page_callback_t callback
);
106 extern int restore_online_page_callback(online_page_callback_t callback
);
108 extern void __online_page_set_limits(struct page
*page
);
109 extern void __online_page_increment_counters(struct page
*page
);
110 extern void __online_page_free(struct page
*page
);
112 extern int try_online_node(int nid
);
114 extern int arch_add_memory(int nid
, u64 start
, u64 size
,
115 struct mhp_restrictions
*restrictions
);
116 extern u64 max_mem_size
;
118 extern bool memhp_auto_online
;
119 /* If movable_node boot option specified */
120 extern bool movable_node_enabled
;
121 static inline bool movable_node_is_enabled(void)
123 return movable_node_enabled
;
126 #ifdef CONFIG_MEMORY_HOTREMOVE
127 extern void arch_remove_memory(int nid
, u64 start
, u64 size
,
128 struct vmem_altmap
*altmap
);
129 extern void __remove_pages(struct zone
*zone
, unsigned long start_pfn
,
130 unsigned long nr_pages
, struct vmem_altmap
*altmap
);
131 #endif /* CONFIG_MEMORY_HOTREMOVE */
134 * Do we want sysfs memblock files created. This will allow userspace to online
135 * and offline memory explicitly. Lack of this bit means that the caller has to
136 * call move_pfn_range_to_zone to finish the initialization.
139 #define MHP_MEMBLOCK_API (1<<0)
141 /* reasonably generic interface to expand the physical pages */
142 extern int __add_pages(int nid
, unsigned long start_pfn
, unsigned long nr_pages
,
143 struct mhp_restrictions
*restrictions
);
145 #ifndef CONFIG_ARCH_HAS_ADD_PAGES
146 static inline int add_pages(int nid
, unsigned long start_pfn
,
147 unsigned long nr_pages
, struct mhp_restrictions
*restrictions
)
149 return __add_pages(nid
, start_pfn
, nr_pages
, restrictions
);
151 #else /* ARCH_HAS_ADD_PAGES */
152 int add_pages(int nid
, unsigned long start_pfn
, unsigned long nr_pages
,
153 struct mhp_restrictions
*restrictions
);
154 #endif /* ARCH_HAS_ADD_PAGES */
157 extern int memory_add_physaddr_to_nid(u64 start
);
159 static inline int memory_add_physaddr_to_nid(u64 start
)
165 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
167 * For supporting node-hotadd, we have to allocate a new pgdat.
169 * If an arch has generic style NODE_DATA(),
170 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
172 * In general, generic_alloc_nodedata() is used.
173 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
176 extern pg_data_t
*arch_alloc_nodedata(int nid
);
177 extern void arch_free_nodedata(pg_data_t
*pgdat
);
178 extern void arch_refresh_nodedata(int nid
, pg_data_t
*pgdat
);
180 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
182 #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
183 #define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
187 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
188 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
189 * Because, pgdat for the new node is not allocated/initialized yet itself.
190 * To use new node's memory, more consideration will be necessary.
192 #define generic_alloc_nodedata(nid) \
194 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
197 * This definition is just for error path in node hotadd.
198 * For node hotremove, we have to replace this.
200 #define generic_free_nodedata(pgdat) kfree(pgdat)
202 extern pg_data_t
*node_data
[];
203 static inline void arch_refresh_nodedata(int nid
, pg_data_t
*pgdat
)
205 node_data
[nid
] = pgdat
;
208 #else /* !CONFIG_NUMA */
211 static inline pg_data_t
*generic_alloc_nodedata(int nid
)
216 static inline void generic_free_nodedata(pg_data_t
*pgdat
)
219 static inline void arch_refresh_nodedata(int nid
, pg_data_t
*pgdat
)
222 #endif /* CONFIG_NUMA */
223 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
225 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
226 extern void __init
register_page_bootmem_info_node(struct pglist_data
*pgdat
);
228 static inline void register_page_bootmem_info_node(struct pglist_data
*pgdat
)
232 extern void put_page_bootmem(struct page
*page
);
233 extern void get_page_bootmem(unsigned long ingo
, struct page
*page
,
236 void get_online_mems(void);
237 void put_online_mems(void);
239 void mem_hotplug_begin(void);
240 void mem_hotplug_done(void);
242 extern void set_zone_contiguous(struct zone
*zone
);
243 extern void clear_zone_contiguous(struct zone
*zone
);
245 #else /* ! CONFIG_MEMORY_HOTPLUG */
246 #define pfn_to_online_page(pfn) \
248 struct page *___page = NULL; \
249 if (pfn_valid(pfn)) \
250 ___page = pfn_to_page(pfn); \
254 static inline unsigned zone_span_seqbegin(struct zone
*zone
)
258 static inline int zone_span_seqretry(struct zone
*zone
, unsigned iv
)
262 static inline void zone_span_writelock(struct zone
*zone
) {}
263 static inline void zone_span_writeunlock(struct zone
*zone
) {}
264 static inline void zone_seqlock_init(struct zone
*zone
) {}
266 static inline int mhp_notimplemented(const char *func
)
268 printk(KERN_WARNING
"%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func
);
273 static inline void register_page_bootmem_info_node(struct pglist_data
*pgdat
)
277 static inline int try_online_node(int nid
)
282 static inline void get_online_mems(void) {}
283 static inline void put_online_mems(void) {}
285 static inline void mem_hotplug_begin(void) {}
286 static inline void mem_hotplug_done(void) {}
288 static inline bool movable_node_is_enabled(void)
292 #endif /* ! CONFIG_MEMORY_HOTPLUG */
294 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
296 * pgdat resizing functions
299 void pgdat_resize_lock(struct pglist_data
*pgdat
, unsigned long *flags
)
301 spin_lock_irqsave(&pgdat
->node_size_lock
, *flags
);
304 void pgdat_resize_unlock(struct pglist_data
*pgdat
, unsigned long *flags
)
306 spin_unlock_irqrestore(&pgdat
->node_size_lock
, *flags
);
309 void pgdat_resize_init(struct pglist_data
*pgdat
)
311 spin_lock_init(&pgdat
->node_size_lock
);
313 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
315 * Stub functions for when hotplug is off
317 static inline void pgdat_resize_lock(struct pglist_data
*p
, unsigned long *f
) {}
318 static inline void pgdat_resize_unlock(struct pglist_data
*p
, unsigned long *f
) {}
319 static inline void pgdat_resize_init(struct pglist_data
*pgdat
) {}
320 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
322 #ifdef CONFIG_MEMORY_HOTREMOVE
324 extern bool is_mem_section_removable(unsigned long pfn
, unsigned long nr_pages
);
325 extern void try_offline_node(int nid
);
326 extern int offline_pages(unsigned long start_pfn
, unsigned long nr_pages
);
327 extern int remove_memory(int nid
, u64 start
, u64 size
);
328 extern void __remove_memory(int nid
, u64 start
, u64 size
);
331 static inline bool is_mem_section_removable(unsigned long pfn
,
332 unsigned long nr_pages
)
337 static inline void try_offline_node(int nid
) {}
339 static inline int offline_pages(unsigned long start_pfn
, unsigned long nr_pages
)
344 static inline int remove_memory(int nid
, u64 start
, u64 size
)
349 static inline void __remove_memory(int nid
, u64 start
, u64 size
) {}
350 #endif /* CONFIG_MEMORY_HOTREMOVE */
352 extern void __ref
free_area_init_core_hotplug(int nid
);
353 extern int walk_memory_range(unsigned long start_pfn
, unsigned long end_pfn
,
354 void *arg
, int (*func
)(struct memory_block
*, void *));
355 extern int __add_memory(int nid
, u64 start
, u64 size
);
356 extern int add_memory(int nid
, u64 start
, u64 size
);
357 extern int add_memory_resource(int nid
, struct resource
*resource
);
358 extern void move_pfn_range_to_zone(struct zone
*zone
, unsigned long start_pfn
,
359 unsigned long nr_pages
, struct vmem_altmap
*altmap
);
360 extern bool is_memblock_offlined(struct memory_block
*mem
);
361 extern int sparse_add_one_section(int nid
, unsigned long start_pfn
,
362 struct vmem_altmap
*altmap
);
363 extern void sparse_remove_one_section(struct zone
*zone
, struct mem_section
*ms
,
364 unsigned long map_offset
, struct vmem_altmap
*altmap
);
365 extern struct page
*sparse_decode_mem_map(unsigned long coded_mem_map
,
367 extern bool allow_online_pfn_range(int nid
, unsigned long pfn
, unsigned long nr_pages
,
369 extern struct zone
*zone_for_pfn_range(int online_type
, int nid
, unsigned start_pfn
,
370 unsigned long nr_pages
);
371 #endif /* __LINUX_MEMORY_HOTPLUG_H */