1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_MEMORY_HOTPLUG_H
3 #define __LINUX_MEMORY_HOTPLUG_H
4 
5 #include <linux/mmzone.h>
6 #include <linux/spinlock.h>
7 #include <linux/notifier.h>
8 #include <linux/bug.h>
9 
10 struct page;
11 struct zone;
12 struct pglist_data;
13 struct mem_section;
14 struct memory_block;
15 struct resource;
16 struct vmem_altmap;
17 
18 #ifdef CONFIG_MEMORY_HOTPLUG
19 /*
20  * Return page for the valid pfn only if the page is online. All pfn
21  * walkers which rely on the fully initialized page->flags and others
22  * should use this rather than pfn_valid && pfn_to_page
23  */
24 #define pfn_to_online_page(pfn)				\
25 ({							\
26 	struct page *___page = NULL;			\
27 	unsigned long ___nr = pfn_to_section_nr(pfn);	\
28 							\
29 	if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
30 		___page = pfn_to_page(pfn);		\
31 	___page;					\
32 })
33 
34 /*
35  * Types for free bootmem stored in page->lru.next. These have to be in
36  * some random range in unsigned long space for debugging purposes.
37  */
38 enum {
39 	MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
40 	SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
41 	MIX_SECTION_INFO,
42 	NODE_INFO,
43 	MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
44 };
45 
46 /* Types for control the zone type of onlined and offlined memory */
47 enum {
48 	MMOP_OFFLINE = -1,
49 	MMOP_ONLINE_KEEP,
50 	MMOP_ONLINE_KERNEL,
51 	MMOP_ONLINE_MOVABLE,
52 };
53 
54 /*
55  * Zone resizing functions
56  *
57  * Note: any attempt to resize a zone should has pgdat_resize_lock()
58  * zone_span_writelock() both held. This ensure the size of a zone
59  * can't be changed while pgdat_resize_lock() held.
60  */
zone_span_seqbegin(struct zone * zone)61 static inline unsigned zone_span_seqbegin(struct zone *zone)
62 {
63 	return read_seqbegin(&zone->span_seqlock);
64 }
zone_span_seqretry(struct zone * zone,unsigned iv)65 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
66 {
67 	return read_seqretry(&zone->span_seqlock, iv);
68 }
zone_span_writelock(struct zone * zone)69 static inline void zone_span_writelock(struct zone *zone)
70 {
71 	write_seqlock(&zone->span_seqlock);
72 }
zone_span_writeunlock(struct zone * zone)73 static inline void zone_span_writeunlock(struct zone *zone)
74 {
75 	write_sequnlock(&zone->span_seqlock);
76 }
zone_seqlock_init(struct zone * zone)77 static inline void zone_seqlock_init(struct zone *zone)
78 {
79 	seqlock_init(&zone->span_seqlock);
80 }
81 extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
82 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
83 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
84 /* VM interface that may be used by firmware interface */
85 extern int online_pages(unsigned long, unsigned long, int);
86 extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
87 	unsigned long *valid_start, unsigned long *valid_end);
88 extern void __offline_isolated_pages(unsigned long, unsigned long);
89 
90 typedef void (*online_page_callback_t)(struct page *page);
91 
92 extern int set_online_page_callback(online_page_callback_t callback);
93 extern int restore_online_page_callback(online_page_callback_t callback);
94 
95 extern void __online_page_set_limits(struct page *page);
96 extern void __online_page_increment_counters(struct page *page);
97 extern void __online_page_free(struct page *page);
98 
99 extern int try_online_node(int nid);
100 
101 extern bool memhp_auto_online;
102 /* If movable_node boot option specified */
103 extern bool movable_node_enabled;
movable_node_is_enabled(void)104 static inline bool movable_node_is_enabled(void)
105 {
106 	return movable_node_enabled;
107 }
108 
109 #ifdef CONFIG_MEMORY_HOTREMOVE
110 extern int arch_remove_memory(u64 start, u64 size,
111 		struct vmem_altmap *altmap);
112 extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
113 	unsigned long nr_pages, struct vmem_altmap *altmap);
114 #endif /* CONFIG_MEMORY_HOTREMOVE */
115 
116 /* reasonably generic interface to expand the physical pages */
117 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
118 		struct vmem_altmap *altmap, bool want_memblock);
119 
120 #ifndef CONFIG_ARCH_HAS_ADD_PAGES
add_pages(int nid,unsigned long start_pfn,unsigned long nr_pages,struct vmem_altmap * altmap,bool want_memblock)121 static inline int add_pages(int nid, unsigned long start_pfn,
122 		unsigned long nr_pages, struct vmem_altmap *altmap,
123 		bool want_memblock)
124 {
125 	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
126 }
127 #else /* ARCH_HAS_ADD_PAGES */
128 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
129 		struct vmem_altmap *altmap, bool want_memblock);
130 #endif /* ARCH_HAS_ADD_PAGES */
131 
132 #ifdef CONFIG_NUMA
133 extern int memory_add_physaddr_to_nid(u64 start);
134 #else
memory_add_physaddr_to_nid(u64 start)135 static inline int memory_add_physaddr_to_nid(u64 start)
136 {
137 	return 0;
138 }
139 #endif
140 
141 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
142 /*
143  * For supporting node-hotadd, we have to allocate a new pgdat.
144  *
145  * If an arch has generic style NODE_DATA(),
146  * node_data[nid] = kzalloc() works well. But it depends on the architecture.
147  *
148  * In general, generic_alloc_nodedata() is used.
149  * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
150  *
151  */
152 extern pg_data_t *arch_alloc_nodedata(int nid);
153 extern void arch_free_nodedata(pg_data_t *pgdat);
154 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
155 
156 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
157 
158 #define arch_alloc_nodedata(nid)	generic_alloc_nodedata(nid)
159 #define arch_free_nodedata(pgdat)	generic_free_nodedata(pgdat)
160 
161 #ifdef CONFIG_NUMA
162 /*
163  * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
164  * XXX: kmalloc_node() can't work well to get new node's memory at this time.
165  *	Because, pgdat for the new node is not allocated/initialized yet itself.
166  *	To use new node's memory, more consideration will be necessary.
167  */
168 #define generic_alloc_nodedata(nid)				\
169 ({								\
170 	kzalloc(sizeof(pg_data_t), GFP_KERNEL);			\
171 })
172 /*
173  * This definition is just for error path in node hotadd.
174  * For node hotremove, we have to replace this.
175  */
176 #define generic_free_nodedata(pgdat)	kfree(pgdat)
177 
178 extern pg_data_t *node_data[];
arch_refresh_nodedata(int nid,pg_data_t * pgdat)179 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
180 {
181 	node_data[nid] = pgdat;
182 }
183 
184 #else /* !CONFIG_NUMA */
185 
186 /* never called */
generic_alloc_nodedata(int nid)187 static inline pg_data_t *generic_alloc_nodedata(int nid)
188 {
189 	BUG();
190 	return NULL;
191 }
generic_free_nodedata(pg_data_t * pgdat)192 static inline void generic_free_nodedata(pg_data_t *pgdat)
193 {
194 }
arch_refresh_nodedata(int nid,pg_data_t * pgdat)195 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
196 {
197 }
198 #endif /* CONFIG_NUMA */
199 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
200 
201 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
202 extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
203 #else
register_page_bootmem_info_node(struct pglist_data * pgdat)204 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
205 {
206 }
207 #endif
208 extern void put_page_bootmem(struct page *page);
209 extern void get_page_bootmem(unsigned long ingo, struct page *page,
210 			     unsigned long type);
211 
212 void get_online_mems(void);
213 void put_online_mems(void);
214 
215 void mem_hotplug_begin(void);
216 void mem_hotplug_done(void);
217 
218 extern void set_zone_contiguous(struct zone *zone);
219 extern void clear_zone_contiguous(struct zone *zone);
220 
221 #else /* ! CONFIG_MEMORY_HOTPLUG */
222 #define pfn_to_online_page(pfn)			\
223 ({						\
224 	struct page *___page = NULL;		\
225 	if (pfn_valid(pfn))			\
226 		___page = pfn_to_page(pfn);	\
227 	___page;				\
228  })
229 
zone_span_seqbegin(struct zone * zone)230 static inline unsigned zone_span_seqbegin(struct zone *zone)
231 {
232 	return 0;
233 }
zone_span_seqretry(struct zone * zone,unsigned iv)234 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
235 {
236 	return 0;
237 }
zone_span_writelock(struct zone * zone)238 static inline void zone_span_writelock(struct zone *zone) {}
zone_span_writeunlock(struct zone * zone)239 static inline void zone_span_writeunlock(struct zone *zone) {}
zone_seqlock_init(struct zone * zone)240 static inline void zone_seqlock_init(struct zone *zone) {}
241 
mhp_notimplemented(const char * func)242 static inline int mhp_notimplemented(const char *func)
243 {
244 	printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
245 	dump_stack();
246 	return -ENOSYS;
247 }
248 
register_page_bootmem_info_node(struct pglist_data * pgdat)249 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
250 {
251 }
252 
try_online_node(int nid)253 static inline int try_online_node(int nid)
254 {
255 	return 0;
256 }
257 
get_online_mems(void)258 static inline void get_online_mems(void) {}
put_online_mems(void)259 static inline void put_online_mems(void) {}
260 
mem_hotplug_begin(void)261 static inline void mem_hotplug_begin(void) {}
mem_hotplug_done(void)262 static inline void mem_hotplug_done(void) {}
263 
movable_node_is_enabled(void)264 static inline bool movable_node_is_enabled(void)
265 {
266 	return false;
267 }
268 #endif /* ! CONFIG_MEMORY_HOTPLUG */
269 
270 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
271 /*
272  * pgdat resizing functions
273  */
274 static inline
pgdat_resize_lock(struct pglist_data * pgdat,unsigned long * flags)275 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
276 {
277 	spin_lock_irqsave(&pgdat->node_size_lock, *flags);
278 }
279 static inline
pgdat_resize_unlock(struct pglist_data * pgdat,unsigned long * flags)280 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
281 {
282 	spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
283 }
284 static inline
pgdat_resize_init(struct pglist_data * pgdat)285 void pgdat_resize_init(struct pglist_data *pgdat)
286 {
287 	spin_lock_init(&pgdat->node_size_lock);
288 }
289 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
290 /*
291  * Stub functions for when hotplug is off
292  */
pgdat_resize_lock(struct pglist_data * p,unsigned long * f)293 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_unlock(struct pglist_data * p,unsigned long * f)294 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_init(struct pglist_data * pgdat)295 static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
296 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
297 
298 #ifdef CONFIG_MEMORY_HOTREMOVE
299 
300 extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
301 extern void try_offline_node(int nid);
302 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
303 extern void remove_memory(int nid, u64 start, u64 size);
304 
305 #else
is_mem_section_removable(unsigned long pfn,unsigned long nr_pages)306 static inline bool is_mem_section_removable(unsigned long pfn,
307 					unsigned long nr_pages)
308 {
309 	return false;
310 }
311 
try_offline_node(int nid)312 static inline void try_offline_node(int nid) {}
313 
offline_pages(unsigned long start_pfn,unsigned long nr_pages)314 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
315 {
316 	return -EINVAL;
317 }
318 
remove_memory(int nid,u64 start,u64 size)319 static inline void remove_memory(int nid, u64 start, u64 size) {}
320 #endif /* CONFIG_MEMORY_HOTREMOVE */
321 
322 extern void __ref free_area_init_core_hotplug(int nid);
323 extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
324 		void *arg, int (*func)(struct memory_block *, void *));
325 extern int add_memory(int nid, u64 start, u64 size);
326 extern int add_memory_resource(int nid, struct resource *resource, bool online);
327 extern int arch_add_memory(int nid, u64 start, u64 size,
328 		struct vmem_altmap *altmap, bool want_memblock);
329 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
330 		unsigned long nr_pages, struct vmem_altmap *altmap);
331 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
332 extern bool is_memblock_offlined(struct memory_block *mem);
333 extern void remove_memory(int nid, u64 start, u64 size);
334 extern int sparse_add_one_section(struct pglist_data *pgdat,
335 		unsigned long start_pfn, struct vmem_altmap *altmap);
336 extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
337 		unsigned long map_offset, struct vmem_altmap *altmap);
338 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
339 					  unsigned long pnum);
340 extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
341 		int online_type);
342 extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
343 		unsigned long nr_pages);
344 #endif /* __LINUX_MEMORY_HOTPLUG_H */
345