1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/mm/page_isolation.c
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/page-isolation.h>
8 #include <linux/pageblock-flags.h>
9 #include <linux/memory.h>
10 #include <linux/hugetlb.h>
11 #include <linux/page_owner.h>
12 #include <linux/migrate.h>
13 #include "internal.h"
14 
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/page_isolation.h>
17 
set_migratetype_isolate(struct page * page,int migratetype,bool skip_hwpoisoned_pages)18 static int set_migratetype_isolate(struct page *page, int migratetype,
19 				bool skip_hwpoisoned_pages)
20 {
21 	struct zone *zone;
22 	unsigned long flags, pfn;
23 	struct memory_isolate_notify arg;
24 	int notifier_ret;
25 	int ret = -EBUSY;
26 
27 	zone = page_zone(page);
28 
29 	spin_lock_irqsave(&zone->lock, flags);
30 
31 	/*
32 	 * We assume the caller intended to SET migrate type to isolate.
33 	 * If it is already set, then someone else must have raced and
34 	 * set it before us.  Return -EBUSY
35 	 */
36 	if (is_migrate_isolate_page(page))
37 		goto out;
38 
39 	pfn = page_to_pfn(page);
40 	arg.start_pfn = pfn;
41 	arg.nr_pages = pageblock_nr_pages;
42 	arg.pages_found = 0;
43 
44 	/*
45 	 * It may be possible to isolate a pageblock even if the
46 	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
47 	 * notifier chain is used by balloon drivers to return the
48 	 * number of pages in a range that are held by the balloon
49 	 * driver to shrink memory. If all the pages are accounted for
50 	 * by balloons, are free, or on the LRU, isolation can continue.
51 	 * Later, for example, when memory hotplug notifier runs, these
52 	 * pages reported as "can be isolated" should be isolated(freed)
53 	 * by the balloon driver through the memory notifier chain.
54 	 */
55 	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
56 	notifier_ret = notifier_to_errno(notifier_ret);
57 	if (notifier_ret)
58 		goto out;
59 	/*
60 	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
61 	 * We just check MOVABLE pages.
62 	 */
63 	if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
64 				 skip_hwpoisoned_pages))
65 		ret = 0;
66 
67 	/*
68 	 * immobile means "not-on-lru" pages. If immobile is larger than
69 	 * removable-by-driver pages reported by notifier, we'll fail.
70 	 */
71 
72 out:
73 	if (!ret) {
74 		unsigned long nr_pages;
75 		int mt = get_pageblock_migratetype(page);
76 
77 		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
78 		zone->nr_isolate_pageblock++;
79 		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
80 									NULL);
81 
82 		__mod_zone_freepage_state(zone, -nr_pages, mt);
83 	}
84 
85 	spin_unlock_irqrestore(&zone->lock, flags);
86 	if (!ret)
87 		drain_all_pages(zone);
88 	return ret;
89 }
90 
unset_migratetype_isolate(struct page * page,unsigned migratetype)91 static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
92 {
93 	struct zone *zone;
94 	unsigned long flags, nr_pages;
95 	bool isolated_page = false;
96 	unsigned int order;
97 	unsigned long pfn, buddy_pfn;
98 	struct page *buddy;
99 
100 	zone = page_zone(page);
101 	spin_lock_irqsave(&zone->lock, flags);
102 	if (!is_migrate_isolate_page(page))
103 		goto out;
104 
105 	/*
106 	 * Because freepage with more than pageblock_order on isolated
107 	 * pageblock is restricted to merge due to freepage counting problem,
108 	 * it is possible that there is free buddy page.
109 	 * move_freepages_block() doesn't care of merge so we need other
110 	 * approach in order to merge them. Isolation and free will make
111 	 * these pages to be merged.
112 	 */
113 	if (PageBuddy(page)) {
114 		order = page_order(page);
115 		if (order >= pageblock_order) {
116 			pfn = page_to_pfn(page);
117 			buddy_pfn = __find_buddy_pfn(pfn, order);
118 			buddy = page + (buddy_pfn - pfn);
119 
120 			if (pfn_valid_within(buddy_pfn) &&
121 			    !is_migrate_isolate_page(buddy)) {
122 				__isolate_free_page(page, order);
123 				isolated_page = true;
124 			}
125 		}
126 	}
127 
128 	/*
129 	 * If we isolate freepage with more than pageblock_order, there
130 	 * should be no freepage in the range, so we could avoid costly
131 	 * pageblock scanning for freepage moving.
132 	 */
133 	if (!isolated_page) {
134 		nr_pages = move_freepages_block(zone, page, migratetype, NULL);
135 		__mod_zone_freepage_state(zone, nr_pages, migratetype);
136 	}
137 	set_pageblock_migratetype(page, migratetype);
138 	zone->nr_isolate_pageblock--;
139 out:
140 	spin_unlock_irqrestore(&zone->lock, flags);
141 	if (isolated_page) {
142 		post_alloc_hook(page, order, __GFP_MOVABLE);
143 		__free_pages(page, order);
144 	}
145 }
146 
147 static inline struct page *
__first_valid_page(unsigned long pfn,unsigned long nr_pages)148 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
149 {
150 	int i;
151 
152 	for (i = 0; i < nr_pages; i++) {
153 		struct page *page;
154 
155 		if (!pfn_valid_within(pfn + i))
156 			continue;
157 		page = pfn_to_online_page(pfn + i);
158 		if (!page)
159 			continue;
160 		return page;
161 	}
162 	return NULL;
163 }
164 
165 /*
166  * start_isolate_page_range() -- make page-allocation-type of range of pages
167  * to be MIGRATE_ISOLATE.
168  * @start_pfn: The lower PFN of the range to be isolated.
169  * @end_pfn: The upper PFN of the range to be isolated.
170  * @migratetype: migrate type to set in error recovery.
171  *
172  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
173  * the range will never be allocated. Any free pages and pages freed in the
174  * future will not be allocated again.
175  *
176  * start_pfn/end_pfn must be aligned to pageblock_order.
177  * Return 0 on success and -EBUSY if any part of range cannot be isolated.
178  *
179  * There is no high level synchronization mechanism that prevents two threads
180  * from trying to isolate overlapping ranges.  If this happens, one thread
181  * will notice pageblocks in the overlapping range already set to isolate.
182  * This happens in set_migratetype_isolate, and set_migratetype_isolate
183  * returns an error.  We then clean up by restoring the migration type on
184  * pageblocks we may have modified and return -EBUSY to caller.  This
185  * prevents two threads from simultaneously working on overlapping ranges.
186  */
start_isolate_page_range(unsigned long start_pfn,unsigned long end_pfn,unsigned migratetype,bool skip_hwpoisoned_pages)187 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
188 			     unsigned migratetype, bool skip_hwpoisoned_pages)
189 {
190 	unsigned long pfn;
191 	unsigned long undo_pfn;
192 	struct page *page;
193 
194 	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
195 	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
196 
197 	for (pfn = start_pfn;
198 	     pfn < end_pfn;
199 	     pfn += pageblock_nr_pages) {
200 		page = __first_valid_page(pfn, pageblock_nr_pages);
201 		if (page &&
202 		    set_migratetype_isolate(page, migratetype, skip_hwpoisoned_pages)) {
203 			undo_pfn = pfn;
204 			goto undo;
205 		}
206 	}
207 	return 0;
208 undo:
209 	for (pfn = start_pfn;
210 	     pfn < undo_pfn;
211 	     pfn += pageblock_nr_pages) {
212 		struct page *page = pfn_to_online_page(pfn);
213 		if (!page)
214 			continue;
215 		unset_migratetype_isolate(page, migratetype);
216 	}
217 
218 	return -EBUSY;
219 }
220 
221 /*
222  * Make isolated pages available again.
223  */
undo_isolate_page_range(unsigned long start_pfn,unsigned long end_pfn,unsigned migratetype)224 int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
225 			    unsigned migratetype)
226 {
227 	unsigned long pfn;
228 	struct page *page;
229 
230 	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
231 	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
232 
233 	for (pfn = start_pfn;
234 	     pfn < end_pfn;
235 	     pfn += pageblock_nr_pages) {
236 		page = __first_valid_page(pfn, pageblock_nr_pages);
237 		if (!page || !is_migrate_isolate_page(page))
238 			continue;
239 		unset_migratetype_isolate(page, migratetype);
240 	}
241 	return 0;
242 }
243 /*
244  * Test all pages in the range is free(means isolated) or not.
245  * all pages in [start_pfn...end_pfn) must be in the same zone.
246  * zone->lock must be held before call this.
247  *
248  * Returns the last tested pfn.
249  */
250 static unsigned long
__test_page_isolated_in_pageblock(unsigned long pfn,unsigned long end_pfn,bool skip_hwpoisoned_pages)251 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
252 				  bool skip_hwpoisoned_pages)
253 {
254 	struct page *page;
255 
256 	while (pfn < end_pfn) {
257 		if (!pfn_valid_within(pfn)) {
258 			pfn++;
259 			continue;
260 		}
261 		page = pfn_to_page(pfn);
262 		if (PageBuddy(page))
263 			/*
264 			 * If the page is on a free list, it has to be on
265 			 * the correct MIGRATE_ISOLATE freelist. There is no
266 			 * simple way to verify that as VM_BUG_ON(), though.
267 			 */
268 			pfn += 1 << page_order(page);
269 		else if (skip_hwpoisoned_pages && PageHWPoison(page))
270 			/* A HWPoisoned page cannot be also PageBuddy */
271 			pfn++;
272 		else
273 			break;
274 	}
275 
276 	return pfn;
277 }
278 
279 /* Caller should ensure that requested range is in a single zone */
test_pages_isolated(unsigned long start_pfn,unsigned long end_pfn,bool skip_hwpoisoned_pages)280 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
281 			bool skip_hwpoisoned_pages)
282 {
283 	unsigned long pfn, flags;
284 	struct page *page;
285 	struct zone *zone;
286 
287 	/*
288 	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
289 	 * are not aligned to pageblock_nr_pages.
290 	 * Then we just check migratetype first.
291 	 */
292 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
293 		page = __first_valid_page(pfn, pageblock_nr_pages);
294 		if (page && !is_migrate_isolate_page(page))
295 			break;
296 	}
297 	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
298 	if ((pfn < end_pfn) || !page)
299 		return -EBUSY;
300 	/* Check all pages are free or marked as ISOLATED */
301 	zone = page_zone(page);
302 	spin_lock_irqsave(&zone->lock, flags);
303 	pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
304 						skip_hwpoisoned_pages);
305 	spin_unlock_irqrestore(&zone->lock, flags);
306 
307 	trace_test_pages_isolated(start_pfn, end_pfn, pfn);
308 
309 	return pfn < end_pfn ? -EBUSY : 0;
310 }
311 
alloc_migrate_target(struct page * page,unsigned long private)312 struct page *alloc_migrate_target(struct page *page, unsigned long private)
313 {
314 	return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
315 }
316