1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_MEMORY_HOTPLUG_H
3 #define __LINUX_MEMORY_HOTPLUG_H
4
5 #include <linux/mmzone.h>
6 #include <linux/spinlock.h>
7 #include <linux/notifier.h>
8 #include <linux/bug.h>
9
10 struct page;
11 struct zone;
12 struct pglist_data;
13 struct mem_section;
14 struct memory_block;
15 struct memory_group;
16 struct resource;
17 struct vmem_altmap;
18
19 #ifdef CONFIG_MEMORY_HOTPLUG
20 struct page *pfn_to_online_page(unsigned long pfn);
21
22 /* Types for control the zone type of onlined and offlined memory */
23 enum {
24 /* Offline the memory. */
25 MMOP_OFFLINE = 0,
26 /* Online the memory. Zone depends, see default_zone_for_pfn(). */
27 MMOP_ONLINE,
28 /* Online the memory to ZONE_NORMAL. */
29 MMOP_ONLINE_KERNEL,
30 /* Online the memory to ZONE_MOVABLE. */
31 MMOP_ONLINE_MOVABLE,
32 };
33
34 /* Flags for add_memory() and friends to specify memory hotplug details. */
35 typedef int __bitwise mhp_t;
36
37 /* No special request */
38 #define MHP_NONE ((__force mhp_t)0)
39 /*
40 * Allow merging of the added System RAM resource with adjacent,
41 * mergeable resources. After a successful call to add_memory_resource()
42 * with this flag set, the resource pointer must no longer be used as it
43 * might be stale, or the resource might have changed.
44 */
45 #define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
46
47 /*
48 * We want memmap (struct page array) to be self contained.
49 * To do so, we will use the beginning of the hot-added range to build
50 * the page tables for the memmap array that describes the entire range.
51 * Only selected architectures support it with SPARSE_VMEMMAP.
52 */
53 #define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
54 /*
55 * The nid field specifies a memory group id (mgid) instead. The memory group
56 * implies the node id (nid).
57 */
58 #define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
59
60 /*
61 * Extended parameters for memory hotplug:
62 * altmap: alternative allocator for memmap array (optional)
63 * pgprot: page protection flags to apply to newly created page tables
64 * (required)
65 */
66 struct mhp_params {
67 struct vmem_altmap *altmap;
68 pgprot_t pgprot;
69 };
70
71 bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
72 struct range mhp_get_pluggable_range(bool need_mapping);
73
74 /*
75 * Zone resizing functions
76 *
77 * Note: any attempt to resize a zone should has pgdat_resize_lock()
78 * zone_span_writelock() both held. This ensure the size of a zone
79 * can't be changed while pgdat_resize_lock() held.
80 */
zone_span_seqbegin(struct zone * zone)81 static inline unsigned zone_span_seqbegin(struct zone *zone)
82 {
83 return read_seqbegin(&zone->span_seqlock);
84 }
zone_span_seqretry(struct zone * zone,unsigned iv)85 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
86 {
87 return read_seqretry(&zone->span_seqlock, iv);
88 }
zone_span_writelock(struct zone * zone)89 static inline void zone_span_writelock(struct zone *zone)
90 {
91 write_seqlock(&zone->span_seqlock);
92 }
zone_span_writeunlock(struct zone * zone)93 static inline void zone_span_writeunlock(struct zone *zone)
94 {
95 write_sequnlock(&zone->span_seqlock);
96 }
zone_seqlock_init(struct zone * zone)97 static inline void zone_seqlock_init(struct zone *zone)
98 {
99 seqlock_init(&zone->span_seqlock);
100 }
101 extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
102 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
103 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
104 extern void adjust_present_page_count(struct page *page,
105 struct memory_group *group,
106 long nr_pages);
107 /* VM interface that may be used by firmware interface */
108 extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
109 struct zone *zone);
110 extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
111 extern int online_pages(unsigned long pfn, unsigned long nr_pages,
112 struct zone *zone, struct memory_group *group);
113 extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
114 unsigned long end_pfn);
115 extern void __offline_isolated_pages(unsigned long start_pfn,
116 unsigned long end_pfn);
117
118 typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
119
120 extern void generic_online_page(struct page *page, unsigned int order);
121 extern int set_online_page_callback(online_page_callback_t callback);
122 extern int restore_online_page_callback(online_page_callback_t callback);
123
124 extern int try_online_node(int nid);
125
126 extern int arch_add_memory(int nid, u64 start, u64 size,
127 struct mhp_params *params);
128 extern u64 max_mem_size;
129
130 extern int mhp_online_type_from_str(const char *str);
131
132 /* Default online_type (MMOP_*) when new memory blocks are added. */
133 extern int mhp_default_online_type;
134 /* If movable_node boot option specified */
135 extern bool movable_node_enabled;
movable_node_is_enabled(void)136 static inline bool movable_node_is_enabled(void)
137 {
138 return movable_node_enabled;
139 }
140
141 extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
142 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
143 struct vmem_altmap *altmap);
144
145 /* reasonably generic interface to expand the physical pages */
146 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
147 struct mhp_params *params);
148
149 #ifndef CONFIG_ARCH_HAS_ADD_PAGES
add_pages(int nid,unsigned long start_pfn,unsigned long nr_pages,struct mhp_params * params)150 static inline int add_pages(int nid, unsigned long start_pfn,
151 unsigned long nr_pages, struct mhp_params *params)
152 {
153 return __add_pages(nid, start_pfn, nr_pages, params);
154 }
155 #else /* ARCH_HAS_ADD_PAGES */
156 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
157 struct mhp_params *params);
158 #endif /* ARCH_HAS_ADD_PAGES */
159
160 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
161 /*
162 * For supporting node-hotadd, we have to allocate a new pgdat.
163 *
164 * If an arch has generic style NODE_DATA(),
165 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
166 *
167 * In general, generic_alloc_nodedata() is used.
168 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
169 *
170 */
171 extern pg_data_t *arch_alloc_nodedata(int nid);
172 extern void arch_free_nodedata(pg_data_t *pgdat);
173 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
174
175 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
176
177 #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
178 #define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
179
180 #ifdef CONFIG_NUMA
181 /*
182 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
183 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
184 * Because, pgdat for the new node is not allocated/initialized yet itself.
185 * To use new node's memory, more consideration will be necessary.
186 */
187 #define generic_alloc_nodedata(nid) \
188 ({ \
189 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
190 })
191 /*
192 * This definition is just for error path in node hotadd.
193 * For node hotremove, we have to replace this.
194 */
195 #define generic_free_nodedata(pgdat) kfree(pgdat)
196
197 extern pg_data_t *node_data[];
arch_refresh_nodedata(int nid,pg_data_t * pgdat)198 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
199 {
200 node_data[nid] = pgdat;
201 }
202
203 #else /* !CONFIG_NUMA */
204
205 /* never called */
generic_alloc_nodedata(int nid)206 static inline pg_data_t *generic_alloc_nodedata(int nid)
207 {
208 BUG();
209 return NULL;
210 }
generic_free_nodedata(pg_data_t * pgdat)211 static inline void generic_free_nodedata(pg_data_t *pgdat)
212 {
213 }
arch_refresh_nodedata(int nid,pg_data_t * pgdat)214 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
215 {
216 }
217 #endif /* CONFIG_NUMA */
218 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
219
220 void get_online_mems(void);
221 void put_online_mems(void);
222
223 void mem_hotplug_begin(void);
224 void mem_hotplug_done(void);
225
226 #else /* ! CONFIG_MEMORY_HOTPLUG */
227 #define pfn_to_online_page(pfn) \
228 ({ \
229 struct page *___page = NULL; \
230 if (pfn_valid(pfn)) \
231 ___page = pfn_to_page(pfn); \
232 ___page; \
233 })
234
zone_span_seqbegin(struct zone * zone)235 static inline unsigned zone_span_seqbegin(struct zone *zone)
236 {
237 return 0;
238 }
zone_span_seqretry(struct zone * zone,unsigned iv)239 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
240 {
241 return 0;
242 }
zone_span_writelock(struct zone * zone)243 static inline void zone_span_writelock(struct zone *zone) {}
zone_span_writeunlock(struct zone * zone)244 static inline void zone_span_writeunlock(struct zone *zone) {}
zone_seqlock_init(struct zone * zone)245 static inline void zone_seqlock_init(struct zone *zone) {}
246
try_online_node(int nid)247 static inline int try_online_node(int nid)
248 {
249 return 0;
250 }
251
get_online_mems(void)252 static inline void get_online_mems(void) {}
put_online_mems(void)253 static inline void put_online_mems(void) {}
254
mem_hotplug_begin(void)255 static inline void mem_hotplug_begin(void) {}
mem_hotplug_done(void)256 static inline void mem_hotplug_done(void) {}
257
movable_node_is_enabled(void)258 static inline bool movable_node_is_enabled(void)
259 {
260 return false;
261 }
262 #endif /* ! CONFIG_MEMORY_HOTPLUG */
263
264 /*
265 * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
266 * platforms might override and use arch_get_mappable_range()
267 * for internal non memory hotplug purposes.
268 */
269 struct range arch_get_mappable_range(void);
270
271 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
272 /*
273 * pgdat resizing functions
274 */
275 static inline
pgdat_resize_lock(struct pglist_data * pgdat,unsigned long * flags)276 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
277 {
278 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
279 }
280 static inline
pgdat_resize_unlock(struct pglist_data * pgdat,unsigned long * flags)281 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
282 {
283 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
284 }
285 static inline
pgdat_resize_init(struct pglist_data * pgdat)286 void pgdat_resize_init(struct pglist_data *pgdat)
287 {
288 spin_lock_init(&pgdat->node_size_lock);
289 }
290 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
291 /*
292 * Stub functions for when hotplug is off
293 */
pgdat_resize_lock(struct pglist_data * p,unsigned long * f)294 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_unlock(struct pglist_data * p,unsigned long * f)295 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_init(struct pglist_data * pgdat)296 static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
297 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
298
299 #ifdef CONFIG_MEMORY_HOTREMOVE
300
301 extern void try_offline_node(int nid);
302 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
303 struct memory_group *group);
304 extern int remove_memory(u64 start, u64 size);
305 extern void __remove_memory(u64 start, u64 size);
306 extern int offline_and_remove_memory(u64 start, u64 size);
307
308 #else
try_offline_node(int nid)309 static inline void try_offline_node(int nid) {}
310
offline_pages(unsigned long start_pfn,unsigned long nr_pages,struct memory_group * group)311 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
312 struct memory_group *group)
313 {
314 return -EINVAL;
315 }
316
remove_memory(u64 start,u64 size)317 static inline int remove_memory(u64 start, u64 size)
318 {
319 return -EBUSY;
320 }
321
__remove_memory(u64 start,u64 size)322 static inline void __remove_memory(u64 start, u64 size) {}
323 #endif /* CONFIG_MEMORY_HOTREMOVE */
324
325 extern void set_zone_contiguous(struct zone *zone);
326 extern void clear_zone_contiguous(struct zone *zone);
327
328 #ifdef CONFIG_MEMORY_HOTPLUG
329 extern void __ref free_area_init_core_hotplug(int nid);
330 extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
331 extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
332 extern int add_memory_resource(int nid, struct resource *resource,
333 mhp_t mhp_flags);
334 extern int add_memory_driver_managed(int nid, u64 start, u64 size,
335 const char *resource_name,
336 mhp_t mhp_flags);
337 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
338 unsigned long nr_pages,
339 struct vmem_altmap *altmap, int migratetype);
340 extern void remove_pfn_range_from_zone(struct zone *zone,
341 unsigned long start_pfn,
342 unsigned long nr_pages);
343 extern bool is_memblock_offlined(struct memory_block *mem);
344 extern int sparse_add_section(int nid, unsigned long pfn,
345 unsigned long nr_pages, struct vmem_altmap *altmap);
346 extern void sparse_remove_section(struct mem_section *ms,
347 unsigned long pfn, unsigned long nr_pages,
348 unsigned long map_offset, struct vmem_altmap *altmap);
349 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
350 unsigned long pnum);
351 extern struct zone *zone_for_pfn_range(int online_type, int nid,
352 struct memory_group *group, unsigned long start_pfn,
353 unsigned long nr_pages);
354 extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
355 struct mhp_params *params);
356 void arch_remove_linear_mapping(u64 start, u64 size);
357 extern bool mhp_supports_memmap_on_memory(unsigned long size);
358 #endif /* CONFIG_MEMORY_HOTPLUG */
359
360 #endif /* __LINUX_MEMORY_HOTPLUG_H */
361