1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_MEMORY_HOTPLUG_H
3 #define __LINUX_MEMORY_HOTPLUG_H
5 #include <linux/mmzone.h>
6 #include <linux/spinlock.h>
7 #include <linux/notifier.h>
18 #ifdef CONFIG_MEMORY_HOTPLUG
19 struct page
*pfn_to_online_page(unsigned long pfn
);
22 * Types for free bootmem stored in page->lru.next. These have to be in
23 * some random range in unsigned long space for debugging purposes.
26 MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE
= 12,
27 SECTION_INFO
= MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE
,
30 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE
= NODE_INFO
,
33 /* Types for control the zone type of onlined and offlined memory */
35 /* Offline the memory. */
37 /* Online the memory. Zone depends, see default_zone_for_pfn(). */
39 /* Online the memory to ZONE_NORMAL. */
41 /* Online the memory to ZONE_MOVABLE. */
45 /* Flags for add_memory() and friends to specify memory hotplug details. */
46 typedef int __bitwise mhp_t
;
48 /* No special request */
49 #define MHP_NONE ((__force mhp_t)0)
51 * Allow merging of the added System RAM resource with adjacent,
52 * mergeable resources. After a successful call to add_memory_resource()
53 * with this flag set, the resource pointer must no longer be used as it
54 * might be stale, or the resource might have changed.
56 #define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
59 * We want memmap (struct page array) to be self contained.
60 * To do so, we will use the beginning of the hot-added range to build
61 * the page tables for the memmap array that describes the entire range.
62 * Only selected architectures support it with SPARSE_VMEMMAP.
64 #define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
67 * Extended parameters for memory hotplug:
68 * altmap: alternative allocator for memmap array (optional)
69 * pgprot: page protection flags to apply to newly created page tables
73 struct vmem_altmap
*altmap
;
77 bool mhp_range_allowed(u64 start
, u64 size
, bool need_mapping
);
78 struct range
mhp_get_pluggable_range(bool need_mapping
);
81 * Zone resizing functions
83 * Note: any attempt to resize a zone should has pgdat_resize_lock()
84 * zone_span_writelock() both held. This ensure the size of a zone
85 * can't be changed while pgdat_resize_lock() held.
87 static inline unsigned zone_span_seqbegin(struct zone
*zone
)
89 return read_seqbegin(&zone
->span_seqlock
);
91 static inline int zone_span_seqretry(struct zone
*zone
, unsigned iv
)
93 return read_seqretry(&zone
->span_seqlock
, iv
);
95 static inline void zone_span_writelock(struct zone
*zone
)
97 write_seqlock(&zone
->span_seqlock
);
99 static inline void zone_span_writeunlock(struct zone
*zone
)
101 write_sequnlock(&zone
->span_seqlock
);
103 static inline void zone_seqlock_init(struct zone
*zone
)
105 seqlock_init(&zone
->span_seqlock
);
107 extern int zone_grow_free_lists(struct zone
*zone
, unsigned long new_nr_pages
);
108 extern int zone_grow_waitqueues(struct zone
*zone
, unsigned long nr_pages
);
109 extern int add_one_highpage(struct page
*page
, int pfn
, int bad_ppro
);
110 extern void adjust_present_page_count(struct zone
*zone
, long nr_pages
);
111 /* VM interface that may be used by firmware interface */
112 extern int mhp_init_memmap_on_memory(unsigned long pfn
, unsigned long nr_pages
,
114 extern void mhp_deinit_memmap_on_memory(unsigned long pfn
, unsigned long nr_pages
);
115 extern int online_pages(unsigned long pfn
, unsigned long nr_pages
,
117 extern struct zone
*test_pages_in_a_zone(unsigned long start_pfn
,
118 unsigned long end_pfn
);
119 extern void __offline_isolated_pages(unsigned long start_pfn
,
120 unsigned long end_pfn
);
122 typedef void (*online_page_callback_t
)(struct page
*page
, unsigned int order
);
124 extern void generic_online_page(struct page
*page
, unsigned int order
);
125 extern int set_online_page_callback(online_page_callback_t callback
);
126 extern int restore_online_page_callback(online_page_callback_t callback
);
128 extern int try_online_node(int nid
);
130 extern int arch_add_memory(int nid
, u64 start
, u64 size
,
131 struct mhp_params
*params
);
132 extern u64 max_mem_size
;
134 extern int mhp_online_type_from_str(const char *str
);
136 /* Default online_type (MMOP_*) when new memory blocks are added. */
137 extern int mhp_default_online_type
;
138 /* If movable_node boot option specified */
139 extern bool movable_node_enabled
;
140 static inline bool movable_node_is_enabled(void)
142 return movable_node_enabled
;
145 extern void arch_remove_memory(int nid
, u64 start
, u64 size
,
146 struct vmem_altmap
*altmap
);
147 extern void __remove_pages(unsigned long start_pfn
, unsigned long nr_pages
,
148 struct vmem_altmap
*altmap
);
150 /* reasonably generic interface to expand the physical pages */
151 extern int __add_pages(int nid
, unsigned long start_pfn
, unsigned long nr_pages
,
152 struct mhp_params
*params
);
154 #ifndef CONFIG_ARCH_HAS_ADD_PAGES
155 static inline int add_pages(int nid
, unsigned long start_pfn
,
156 unsigned long nr_pages
, struct mhp_params
*params
)
158 return __add_pages(nid
, start_pfn
, nr_pages
, params
);
160 #else /* ARCH_HAS_ADD_PAGES */
161 int add_pages(int nid
, unsigned long start_pfn
, unsigned long nr_pages
,
162 struct mhp_params
*params
);
163 #endif /* ARCH_HAS_ADD_PAGES */
165 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
167 * For supporting node-hotadd, we have to allocate a new pgdat.
169 * If an arch has generic style NODE_DATA(),
170 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
172 * In general, generic_alloc_nodedata() is used.
173 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
176 extern pg_data_t
*arch_alloc_nodedata(int nid
);
177 extern void arch_free_nodedata(pg_data_t
*pgdat
);
178 extern void arch_refresh_nodedata(int nid
, pg_data_t
*pgdat
);
180 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
182 #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
183 #define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
187 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
188 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
189 * Because, pgdat for the new node is not allocated/initialized yet itself.
190 * To use new node's memory, more consideration will be necessary.
192 #define generic_alloc_nodedata(nid) \
194 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
197 * This definition is just for error path in node hotadd.
198 * For node hotremove, we have to replace this.
200 #define generic_free_nodedata(pgdat) kfree(pgdat)
202 extern pg_data_t
*node_data
[];
203 static inline void arch_refresh_nodedata(int nid
, pg_data_t
*pgdat
)
205 node_data
[nid
] = pgdat
;
208 #else /* !CONFIG_NUMA */
211 static inline pg_data_t
*generic_alloc_nodedata(int nid
)
216 static inline void generic_free_nodedata(pg_data_t
*pgdat
)
219 static inline void arch_refresh_nodedata(int nid
, pg_data_t
*pgdat
)
222 #endif /* CONFIG_NUMA */
223 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
225 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
226 extern void __init
register_page_bootmem_info_node(struct pglist_data
*pgdat
);
228 static inline void register_page_bootmem_info_node(struct pglist_data
*pgdat
)
232 extern void put_page_bootmem(struct page
*page
);
233 extern void get_page_bootmem(unsigned long ingo
, struct page
*page
,
236 void get_online_mems(void);
237 void put_online_mems(void);
239 void mem_hotplug_begin(void);
240 void mem_hotplug_done(void);
242 #else /* ! CONFIG_MEMORY_HOTPLUG */
243 #define pfn_to_online_page(pfn) \
245 struct page *___page = NULL; \
246 if (pfn_valid(pfn)) \
247 ___page = pfn_to_page(pfn); \
251 static inline unsigned zone_span_seqbegin(struct zone
*zone
)
255 static inline int zone_span_seqretry(struct zone
*zone
, unsigned iv
)
259 static inline void zone_span_writelock(struct zone
*zone
) {}
260 static inline void zone_span_writeunlock(struct zone
*zone
) {}
261 static inline void zone_seqlock_init(struct zone
*zone
) {}
263 static inline void register_page_bootmem_info_node(struct pglist_data
*pgdat
)
267 static inline int try_online_node(int nid
)
272 static inline void get_online_mems(void) {}
273 static inline void put_online_mems(void) {}
275 static inline void mem_hotplug_begin(void) {}
276 static inline void mem_hotplug_done(void) {}
278 static inline bool movable_node_is_enabled(void)
282 #endif /* ! CONFIG_MEMORY_HOTPLUG */
285 * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
286 * platforms might override and use arch_get_mappable_range()
287 * for internal non memory hotplug purposes.
289 struct range
arch_get_mappable_range(void);
291 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
293 * pgdat resizing functions
296 void pgdat_resize_lock(struct pglist_data
*pgdat
, unsigned long *flags
)
298 spin_lock_irqsave(&pgdat
->node_size_lock
, *flags
);
301 void pgdat_resize_unlock(struct pglist_data
*pgdat
, unsigned long *flags
)
303 spin_unlock_irqrestore(&pgdat
->node_size_lock
, *flags
);
306 void pgdat_resize_init(struct pglist_data
*pgdat
)
308 spin_lock_init(&pgdat
->node_size_lock
);
310 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
312 * Stub functions for when hotplug is off
314 static inline void pgdat_resize_lock(struct pglist_data
*p
, unsigned long *f
) {}
315 static inline void pgdat_resize_unlock(struct pglist_data
*p
, unsigned long *f
) {}
316 static inline void pgdat_resize_init(struct pglist_data
*pgdat
) {}
317 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
319 #ifdef CONFIG_MEMORY_HOTREMOVE
321 extern void try_offline_node(int nid
);
322 extern int offline_pages(unsigned long start_pfn
, unsigned long nr_pages
);
323 extern int remove_memory(int nid
, u64 start
, u64 size
);
324 extern void __remove_memory(int nid
, u64 start
, u64 size
);
325 extern int offline_and_remove_memory(int nid
, u64 start
, u64 size
);
328 static inline void try_offline_node(int nid
) {}
330 static inline int offline_pages(unsigned long start_pfn
, unsigned long nr_pages
)
335 static inline int remove_memory(int nid
, u64 start
, u64 size
)
340 static inline void __remove_memory(int nid
, u64 start
, u64 size
) {}
341 #endif /* CONFIG_MEMORY_HOTREMOVE */
343 extern void set_zone_contiguous(struct zone
*zone
);
344 extern void clear_zone_contiguous(struct zone
*zone
);
346 #ifdef CONFIG_MEMORY_HOTPLUG
347 extern void __ref
free_area_init_core_hotplug(int nid
);
348 extern int __add_memory(int nid
, u64 start
, u64 size
, mhp_t mhp_flags
);
349 extern int add_memory(int nid
, u64 start
, u64 size
, mhp_t mhp_flags
);
350 extern int add_memory_resource(int nid
, struct resource
*resource
,
352 extern int add_memory_driver_managed(int nid
, u64 start
, u64 size
,
353 const char *resource_name
,
355 extern void move_pfn_range_to_zone(struct zone
*zone
, unsigned long start_pfn
,
356 unsigned long nr_pages
,
357 struct vmem_altmap
*altmap
, int migratetype
);
358 extern void remove_pfn_range_from_zone(struct zone
*zone
,
359 unsigned long start_pfn
,
360 unsigned long nr_pages
);
361 extern bool is_memblock_offlined(struct memory_block
*mem
);
362 extern int sparse_add_section(int nid
, unsigned long pfn
,
363 unsigned long nr_pages
, struct vmem_altmap
*altmap
);
364 extern void sparse_remove_section(struct mem_section
*ms
,
365 unsigned long pfn
, unsigned long nr_pages
,
366 unsigned long map_offset
, struct vmem_altmap
*altmap
);
367 extern struct page
*sparse_decode_mem_map(unsigned long coded_mem_map
,
369 extern struct zone
*zone_for_pfn_range(int online_type
, int nid
, unsigned start_pfn
,
370 unsigned long nr_pages
);
371 extern int arch_create_linear_mapping(int nid
, u64 start
, u64 size
,
372 struct mhp_params
*params
);
373 void arch_remove_linear_mapping(u64 start
, u64 size
);
374 extern bool mhp_supports_memmap_on_memory(unsigned long size
);
375 #endif /* CONFIG_MEMORY_HOTPLUG */
377 #endif /* __LINUX_MEMORY_HOTPLUG_H */