1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * linux/kernel/power/snapshot.c
4   *
5   * This file provides system snapshot/restore functionality for swsusp.
6   *
7   * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8   * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9   */
10  
11  #define pr_fmt(fmt) "PM: hibernation: " fmt
12  
13  #include <linux/version.h>
14  #include <linux/module.h>
15  #include <linux/mm.h>
16  #include <linux/suspend.h>
17  #include <linux/delay.h>
18  #include <linux/bitops.h>
19  #include <linux/spinlock.h>
20  #include <linux/kernel.h>
21  #include <linux/pm.h>
22  #include <linux/device.h>
23  #include <linux/init.h>
24  #include <linux/memblock.h>
25  #include <linux/nmi.h>
26  #include <linux/syscalls.h>
27  #include <linux/console.h>
28  #include <linux/highmem.h>
29  #include <linux/list.h>
30  #include <linux/slab.h>
31  #include <linux/compiler.h>
32  #include <linux/ktime.h>
33  #include <linux/set_memory.h>
34  
35  #include <linux/uaccess.h>
36  #include <asm/mmu_context.h>
37  #include <asm/tlbflush.h>
38  #include <asm/io.h>
39  
40  #include "power.h"
41  
42  #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
43  static bool hibernate_restore_protection;
44  static bool hibernate_restore_protection_active;
45  
enable_restore_image_protection(void)46  void enable_restore_image_protection(void)
47  {
48  	hibernate_restore_protection = true;
49  }
50  
hibernate_restore_protection_begin(void)51  static inline void hibernate_restore_protection_begin(void)
52  {
53  	hibernate_restore_protection_active = hibernate_restore_protection;
54  }
55  
hibernate_restore_protection_end(void)56  static inline void hibernate_restore_protection_end(void)
57  {
58  	hibernate_restore_protection_active = false;
59  }
60  
hibernate_restore_protect_page(void * page_address)61  static inline void hibernate_restore_protect_page(void *page_address)
62  {
63  	if (hibernate_restore_protection_active)
64  		set_memory_ro((unsigned long)page_address, 1);
65  }
66  
hibernate_restore_unprotect_page(void * page_address)67  static inline void hibernate_restore_unprotect_page(void *page_address)
68  {
69  	if (hibernate_restore_protection_active)
70  		set_memory_rw((unsigned long)page_address, 1);
71  }
72  #else
hibernate_restore_protection_begin(void)73  static inline void hibernate_restore_protection_begin(void) {}
hibernate_restore_protection_end(void)74  static inline void hibernate_restore_protection_end(void) {}
hibernate_restore_protect_page(void * page_address)75  static inline void hibernate_restore_protect_page(void *page_address) {}
hibernate_restore_unprotect_page(void * page_address)76  static inline void hibernate_restore_unprotect_page(void *page_address) {}
77  #endif /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
78  
79  
80  /*
81   * The calls to set_direct_map_*() should not fail because remapping a page
82   * here means that we only update protection bits in an existing PTE.
83   * It is still worth to have a warning here if something changes and this
84   * will no longer be the case.
85   */
hibernate_map_page(struct page * page)86  static inline void hibernate_map_page(struct page *page)
87  {
88  	if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
89  		int ret = set_direct_map_default_noflush(page);
90  
91  		if (ret)
92  			pr_warn_once("Failed to remap page\n");
93  	} else {
94  		debug_pagealloc_map_pages(page, 1);
95  	}
96  }
97  
hibernate_unmap_page(struct page * page)98  static inline void hibernate_unmap_page(struct page *page)
99  {
100  	if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
101  		unsigned long addr = (unsigned long)page_address(page);
102  		int ret  = set_direct_map_invalid_noflush(page);
103  
104  		if (ret)
105  			pr_warn_once("Failed to remap page\n");
106  
107  		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
108  	} else {
109  		debug_pagealloc_unmap_pages(page, 1);
110  	}
111  }
112  
113  static int swsusp_page_is_free(struct page *);
114  static void swsusp_set_page_forbidden(struct page *);
115  static void swsusp_unset_page_forbidden(struct page *);
116  
117  /*
118   * Number of bytes to reserve for memory allocations made by device drivers
119   * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
120   * cause image creation to fail (tunable via /sys/power/reserved_size).
121   */
122  unsigned long reserved_size;
123  
hibernate_reserved_size_init(void)124  void __init hibernate_reserved_size_init(void)
125  {
126  	reserved_size = SPARE_PAGES * PAGE_SIZE;
127  }
128  
129  /*
130   * Preferred image size in bytes (tunable via /sys/power/image_size).
131   * When it is set to N, swsusp will do its best to ensure the image
132   * size will not exceed N bytes, but if that is impossible, it will
133   * try to create the smallest image possible.
134   */
135  unsigned long image_size;
136  
hibernate_image_size_init(void)137  void __init hibernate_image_size_init(void)
138  {
139  	image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
140  }
141  
142  /*
143   * List of PBEs needed for restoring the pages that were allocated before
144   * the suspend and included in the suspend image, but have also been
145   * allocated by the "resume" kernel, so their contents cannot be written
146   * directly to their "original" page frames.
147   */
148  struct pbe *restore_pblist;
149  
150  /* struct linked_page is used to build chains of pages */
151  
152  #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
153  
154  struct linked_page {
155  	struct linked_page *next;
156  	char data[LINKED_PAGE_DATA_SIZE];
157  } __packed;
158  
159  /*
160   * List of "safe" pages (ie. pages that were not used by the image kernel
161   * before hibernation) that may be used as temporary storage for image kernel
162   * memory contents.
163   */
164  static struct linked_page *safe_pages_list;
165  
166  /* Pointer to an auxiliary buffer (1 page) */
167  static void *buffer;
168  
169  #define PG_ANY		0
170  #define PG_SAFE		1
171  #define PG_UNSAFE_CLEAR	1
172  #define PG_UNSAFE_KEEP	0
173  
174  static unsigned int allocated_unsafe_pages;
175  
176  /**
177   * get_image_page - Allocate a page for a hibernation image.
178   * @gfp_mask: GFP mask for the allocation.
179   * @safe_needed: Get pages that were not used before hibernation (restore only)
180   *
181   * During image restoration, for storing the PBE list and the image data, we can
182   * only use memory pages that do not conflict with the pages used before
183   * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
184   * using allocated_unsafe_pages.
185   *
186   * Each allocated image page is marked as PageNosave and PageNosaveFree so that
187   * swsusp_free() can release it.
188   */
get_image_page(gfp_t gfp_mask,int safe_needed)189  static void *get_image_page(gfp_t gfp_mask, int safe_needed)
190  {
191  	void *res;
192  
193  	res = (void *)get_zeroed_page(gfp_mask);
194  	if (safe_needed)
195  		while (res && swsusp_page_is_free(virt_to_page(res))) {
196  			/* The page is unsafe, mark it for swsusp_free() */
197  			swsusp_set_page_forbidden(virt_to_page(res));
198  			allocated_unsafe_pages++;
199  			res = (void *)get_zeroed_page(gfp_mask);
200  		}
201  	if (res) {
202  		swsusp_set_page_forbidden(virt_to_page(res));
203  		swsusp_set_page_free(virt_to_page(res));
204  	}
205  	return res;
206  }
207  
__get_safe_page(gfp_t gfp_mask)208  static void *__get_safe_page(gfp_t gfp_mask)
209  {
210  	if (safe_pages_list) {
211  		void *ret = safe_pages_list;
212  
213  		safe_pages_list = safe_pages_list->next;
214  		memset(ret, 0, PAGE_SIZE);
215  		return ret;
216  	}
217  	return get_image_page(gfp_mask, PG_SAFE);
218  }
219  
get_safe_page(gfp_t gfp_mask)220  unsigned long get_safe_page(gfp_t gfp_mask)
221  {
222  	return (unsigned long)__get_safe_page(gfp_mask);
223  }
224  
alloc_image_page(gfp_t gfp_mask)225  static struct page *alloc_image_page(gfp_t gfp_mask)
226  {
227  	struct page *page;
228  
229  	page = alloc_page(gfp_mask);
230  	if (page) {
231  		swsusp_set_page_forbidden(page);
232  		swsusp_set_page_free(page);
233  	}
234  	return page;
235  }
236  
recycle_safe_page(void * page_address)237  static void recycle_safe_page(void *page_address)
238  {
239  	struct linked_page *lp = page_address;
240  
241  	lp->next = safe_pages_list;
242  	safe_pages_list = lp;
243  }
244  
245  /**
246   * free_image_page - Free a page allocated for hibernation image.
247   * @addr: Address of the page to free.
248   * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
249   *
250   * The page to free should have been allocated by get_image_page() (page flags
251   * set by it are affected).
252   */
free_image_page(void * addr,int clear_nosave_free)253  static inline void free_image_page(void *addr, int clear_nosave_free)
254  {
255  	struct page *page;
256  
257  	BUG_ON(!virt_addr_valid(addr));
258  
259  	page = virt_to_page(addr);
260  
261  	swsusp_unset_page_forbidden(page);
262  	if (clear_nosave_free)
263  		swsusp_unset_page_free(page);
264  
265  	__free_page(page);
266  }
267  
free_list_of_pages(struct linked_page * list,int clear_page_nosave)268  static inline void free_list_of_pages(struct linked_page *list,
269  				      int clear_page_nosave)
270  {
271  	while (list) {
272  		struct linked_page *lp = list->next;
273  
274  		free_image_page(list, clear_page_nosave);
275  		list = lp;
276  	}
277  }
278  
279  /*
280   * struct chain_allocator is used for allocating small objects out of
281   * a linked list of pages called 'the chain'.
282   *
283   * The chain grows each time when there is no room for a new object in
284   * the current page.  The allocated objects cannot be freed individually.
285   * It is only possible to free them all at once, by freeing the entire
286   * chain.
287   *
288   * NOTE: The chain allocator may be inefficient if the allocated objects
289   * are not much smaller than PAGE_SIZE.
290   */
291  struct chain_allocator {
292  	struct linked_page *chain;	/* the chain */
293  	unsigned int used_space;	/* total size of objects allocated out
294  					   of the current page */
295  	gfp_t gfp_mask;		/* mask for allocating pages */
296  	int safe_needed;	/* if set, only "safe" pages are allocated */
297  };
298  
chain_init(struct chain_allocator * ca,gfp_t gfp_mask,int safe_needed)299  static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
300  		       int safe_needed)
301  {
302  	ca->chain = NULL;
303  	ca->used_space = LINKED_PAGE_DATA_SIZE;
304  	ca->gfp_mask = gfp_mask;
305  	ca->safe_needed = safe_needed;
306  }
307  
chain_alloc(struct chain_allocator * ca,unsigned int size)308  static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
309  {
310  	void *ret;
311  
312  	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
313  		struct linked_page *lp;
314  
315  		lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
316  					get_image_page(ca->gfp_mask, PG_ANY);
317  		if (!lp)
318  			return NULL;
319  
320  		lp->next = ca->chain;
321  		ca->chain = lp;
322  		ca->used_space = 0;
323  	}
324  	ret = ca->chain->data + ca->used_space;
325  	ca->used_space += size;
326  	return ret;
327  }
328  
329  /*
330   * Data types related to memory bitmaps.
331   *
332   * Memory bitmap is a structure consisting of many linked lists of
333   * objects.  The main list's elements are of type struct zone_bitmap
334   * and each of them corresponds to one zone.  For each zone bitmap
335   * object there is a list of objects of type struct bm_block that
336   * represent each blocks of bitmap in which information is stored.
337   *
338   * struct memory_bitmap contains a pointer to the main list of zone
339   * bitmap objects, a struct bm_position used for browsing the bitmap,
340   * and a pointer to the list of pages used for allocating all of the
341   * zone bitmap objects and bitmap block objects.
342   *
343   * NOTE: It has to be possible to lay out the bitmap in memory
344   * using only allocations of order 0.  Additionally, the bitmap is
345   * designed to work with arbitrary number of zones (this is over the
346   * top for now, but let's avoid making unnecessary assumptions ;-).
347   *
348   * struct zone_bitmap contains a pointer to a list of bitmap block
349   * objects and a pointer to the bitmap block object that has been
350   * most recently used for setting bits.  Additionally, it contains the
351   * PFNs that correspond to the start and end of the represented zone.
352   *
353   * struct bm_block contains a pointer to the memory page in which
354   * information is stored (in the form of a block of bitmap)
355   * It also contains the pfns that correspond to the start and end of
356   * the represented memory area.
357   *
358   * The memory bitmap is organized as a radix tree to guarantee fast random
359   * access to the bits. There is one radix tree for each zone (as returned
360   * from create_mem_extents).
361   *
362   * One radix tree is represented by one struct mem_zone_bm_rtree. There are
363   * two linked lists for the nodes of the tree, one for the inner nodes and
364   * one for the leave nodes. The linked leave nodes are used for fast linear
365   * access of the memory bitmap.
366   *
367   * The struct rtree_node represents one node of the radix tree.
368   */
369  
370  #define BM_END_OF_MAP	(~0UL)
371  
372  #define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
373  #define BM_BLOCK_SHIFT		(PAGE_SHIFT + 3)
374  #define BM_BLOCK_MASK		((1UL << BM_BLOCK_SHIFT) - 1)
375  
376  /*
377   * struct rtree_node is a wrapper struct to link the nodes
378   * of the rtree together for easy linear iteration over
379   * bits and easy freeing
380   */
381  struct rtree_node {
382  	struct list_head list;
383  	unsigned long *data;
384  };
385  
386  /*
387   * struct mem_zone_bm_rtree represents a bitmap used for one
388   * populated memory zone.
389   */
390  struct mem_zone_bm_rtree {
391  	struct list_head list;		/* Link Zones together         */
392  	struct list_head nodes;		/* Radix Tree inner nodes      */
393  	struct list_head leaves;	/* Radix Tree leaves           */
394  	unsigned long start_pfn;	/* Zone start page frame       */
395  	unsigned long end_pfn;		/* Zone end page frame + 1     */
396  	struct rtree_node *rtree;	/* Radix Tree Root             */
397  	int levels;			/* Number of Radix Tree Levels */
398  	unsigned int blocks;		/* Number of Bitmap Blocks     */
399  };
400  
401  /* struct bm_position is used for browsing memory bitmaps */
402  
403  struct bm_position {
404  	struct mem_zone_bm_rtree *zone;
405  	struct rtree_node *node;
406  	unsigned long node_pfn;
407  	unsigned long cur_pfn;
408  	int node_bit;
409  };
410  
411  struct memory_bitmap {
412  	struct list_head zones;
413  	struct linked_page *p_list;	/* list of pages used to store zone
414  					   bitmap objects and bitmap block
415  					   objects */
416  	struct bm_position cur;	/* most recently used bit position */
417  };
418  
419  /* Functions that operate on memory bitmaps */
420  
421  #define BM_ENTRIES_PER_LEVEL	(PAGE_SIZE / sizeof(unsigned long))
422  #if BITS_PER_LONG == 32
423  #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 2)
424  #else
425  #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 3)
426  #endif
427  #define BM_RTREE_LEVEL_MASK	((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
428  
429  /**
430   * alloc_rtree_node - Allocate a new node and add it to the radix tree.
431   * @gfp_mask: GFP mask for the allocation.
432   * @safe_needed: Get pages not used before hibernation (restore only)
433   * @ca: Pointer to a linked list of pages ("a chain") to allocate from
434   * @list: Radix Tree node to add.
435   *
436   * This function is used to allocate inner nodes as well as the
437   * leave nodes of the radix tree. It also adds the node to the
438   * corresponding linked list passed in by the *list parameter.
439   */
alloc_rtree_node(gfp_t gfp_mask,int safe_needed,struct chain_allocator * ca,struct list_head * list)440  static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
441  					   struct chain_allocator *ca,
442  					   struct list_head *list)
443  {
444  	struct rtree_node *node;
445  
446  	node = chain_alloc(ca, sizeof(struct rtree_node));
447  	if (!node)
448  		return NULL;
449  
450  	node->data = get_image_page(gfp_mask, safe_needed);
451  	if (!node->data)
452  		return NULL;
453  
454  	list_add_tail(&node->list, list);
455  
456  	return node;
457  }
458  
459  /**
460   * add_rtree_block - Add a new leave node to the radix tree.
461   *
462   * The leave nodes need to be allocated in order to keep the leaves
463   * linked list in order. This is guaranteed by the zone->blocks
464   * counter.
465   */
add_rtree_block(struct mem_zone_bm_rtree * zone,gfp_t gfp_mask,int safe_needed,struct chain_allocator * ca)466  static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
467  			   int safe_needed, struct chain_allocator *ca)
468  {
469  	struct rtree_node *node, *block, **dst;
470  	unsigned int levels_needed, block_nr;
471  	int i;
472  
473  	block_nr = zone->blocks;
474  	levels_needed = 0;
475  
476  	/* How many levels do we need for this block nr? */
477  	while (block_nr) {
478  		levels_needed += 1;
479  		block_nr >>= BM_RTREE_LEVEL_SHIFT;
480  	}
481  
482  	/* Make sure the rtree has enough levels */
483  	for (i = zone->levels; i < levels_needed; i++) {
484  		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
485  					&zone->nodes);
486  		if (!node)
487  			return -ENOMEM;
488  
489  		node->data[0] = (unsigned long)zone->rtree;
490  		zone->rtree = node;
491  		zone->levels += 1;
492  	}
493  
494  	/* Allocate new block */
495  	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
496  	if (!block)
497  		return -ENOMEM;
498  
499  	/* Now walk the rtree to insert the block */
500  	node = zone->rtree;
501  	dst = &zone->rtree;
502  	block_nr = zone->blocks;
503  	for (i = zone->levels; i > 0; i--) {
504  		int index;
505  
506  		if (!node) {
507  			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
508  						&zone->nodes);
509  			if (!node)
510  				return -ENOMEM;
511  			*dst = node;
512  		}
513  
514  		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
515  		index &= BM_RTREE_LEVEL_MASK;
516  		dst = (struct rtree_node **)&((*dst)->data[index]);
517  		node = *dst;
518  	}
519  
520  	zone->blocks += 1;
521  	*dst = block;
522  
523  	return 0;
524  }
525  
526  static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
527  			       int clear_nosave_free);
528  
529  /**
530   * create_zone_bm_rtree - Create a radix tree for one zone.
531   *
532   * Allocated the mem_zone_bm_rtree structure and initializes it.
533   * This function also allocated and builds the radix tree for the
534   * zone.
535   */
create_zone_bm_rtree(gfp_t gfp_mask,int safe_needed,struct chain_allocator * ca,unsigned long start,unsigned long end)536  static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
537  						      int safe_needed,
538  						      struct chain_allocator *ca,
539  						      unsigned long start,
540  						      unsigned long end)
541  {
542  	struct mem_zone_bm_rtree *zone;
543  	unsigned int i, nr_blocks;
544  	unsigned long pages;
545  
546  	pages = end - start;
547  	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
548  	if (!zone)
549  		return NULL;
550  
551  	INIT_LIST_HEAD(&zone->nodes);
552  	INIT_LIST_HEAD(&zone->leaves);
553  	zone->start_pfn = start;
554  	zone->end_pfn = end;
555  	nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
556  
557  	for (i = 0; i < nr_blocks; i++) {
558  		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
559  			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
560  			return NULL;
561  		}
562  	}
563  
564  	return zone;
565  }
566  
567  /**
568   * free_zone_bm_rtree - Free the memory of the radix tree.
569   *
570   * Free all node pages of the radix tree. The mem_zone_bm_rtree
571   * structure itself is not freed here nor are the rtree_node
572   * structs.
573   */
free_zone_bm_rtree(struct mem_zone_bm_rtree * zone,int clear_nosave_free)574  static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
575  			       int clear_nosave_free)
576  {
577  	struct rtree_node *node;
578  
579  	list_for_each_entry(node, &zone->nodes, list)
580  		free_image_page(node->data, clear_nosave_free);
581  
582  	list_for_each_entry(node, &zone->leaves, list)
583  		free_image_page(node->data, clear_nosave_free);
584  }
585  
memory_bm_position_reset(struct memory_bitmap * bm)586  static void memory_bm_position_reset(struct memory_bitmap *bm)
587  {
588  	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
589  				  list);
590  	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
591  				  struct rtree_node, list);
592  	bm->cur.node_pfn = 0;
593  	bm->cur.cur_pfn = BM_END_OF_MAP;
594  	bm->cur.node_bit = 0;
595  }
596  
597  static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
598  
599  struct mem_extent {
600  	struct list_head hook;
601  	unsigned long start;
602  	unsigned long end;
603  };
604  
605  /**
606   * free_mem_extents - Free a list of memory extents.
607   * @list: List of extents to free.
608   */
free_mem_extents(struct list_head * list)609  static void free_mem_extents(struct list_head *list)
610  {
611  	struct mem_extent *ext, *aux;
612  
613  	list_for_each_entry_safe(ext, aux, list, hook) {
614  		list_del(&ext->hook);
615  		kfree(ext);
616  	}
617  }
618  
619  /**
620   * create_mem_extents - Create a list of memory extents.
621   * @list: List to put the extents into.
622   * @gfp_mask: Mask to use for memory allocations.
623   *
624   * The extents represent contiguous ranges of PFNs.
625   */
create_mem_extents(struct list_head * list,gfp_t gfp_mask)626  static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
627  {
628  	struct zone *zone;
629  
630  	INIT_LIST_HEAD(list);
631  
632  	for_each_populated_zone(zone) {
633  		unsigned long zone_start, zone_end;
634  		struct mem_extent *ext, *cur, *aux;
635  
636  		zone_start = zone->zone_start_pfn;
637  		zone_end = zone_end_pfn(zone);
638  
639  		list_for_each_entry(ext, list, hook)
640  			if (zone_start <= ext->end)
641  				break;
642  
643  		if (&ext->hook == list || zone_end < ext->start) {
644  			/* New extent is necessary */
645  			struct mem_extent *new_ext;
646  
647  			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
648  			if (!new_ext) {
649  				free_mem_extents(list);
650  				return -ENOMEM;
651  			}
652  			new_ext->start = zone_start;
653  			new_ext->end = zone_end;
654  			list_add_tail(&new_ext->hook, &ext->hook);
655  			continue;
656  		}
657  
658  		/* Merge this zone's range of PFNs with the existing one */
659  		if (zone_start < ext->start)
660  			ext->start = zone_start;
661  		if (zone_end > ext->end)
662  			ext->end = zone_end;
663  
664  		/* More merging may be possible */
665  		cur = ext;
666  		list_for_each_entry_safe_continue(cur, aux, list, hook) {
667  			if (zone_end < cur->start)
668  				break;
669  			if (zone_end < cur->end)
670  				ext->end = cur->end;
671  			list_del(&cur->hook);
672  			kfree(cur);
673  		}
674  	}
675  
676  	return 0;
677  }
678  
679  /**
680   * memory_bm_create - Allocate memory for a memory bitmap.
681   */
memory_bm_create(struct memory_bitmap * bm,gfp_t gfp_mask,int safe_needed)682  static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
683  			    int safe_needed)
684  {
685  	struct chain_allocator ca;
686  	struct list_head mem_extents;
687  	struct mem_extent *ext;
688  	int error;
689  
690  	chain_init(&ca, gfp_mask, safe_needed);
691  	INIT_LIST_HEAD(&bm->zones);
692  
693  	error = create_mem_extents(&mem_extents, gfp_mask);
694  	if (error)
695  		return error;
696  
697  	list_for_each_entry(ext, &mem_extents, hook) {
698  		struct mem_zone_bm_rtree *zone;
699  
700  		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
701  					    ext->start, ext->end);
702  		if (!zone) {
703  			error = -ENOMEM;
704  			goto Error;
705  		}
706  		list_add_tail(&zone->list, &bm->zones);
707  	}
708  
709  	bm->p_list = ca.chain;
710  	memory_bm_position_reset(bm);
711   Exit:
712  	free_mem_extents(&mem_extents);
713  	return error;
714  
715   Error:
716  	bm->p_list = ca.chain;
717  	memory_bm_free(bm, PG_UNSAFE_CLEAR);
718  	goto Exit;
719  }
720  
721  /**
722   * memory_bm_free - Free memory occupied by the memory bitmap.
723   * @bm: Memory bitmap.
724   */
memory_bm_free(struct memory_bitmap * bm,int clear_nosave_free)725  static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
726  {
727  	struct mem_zone_bm_rtree *zone;
728  
729  	list_for_each_entry(zone, &bm->zones, list)
730  		free_zone_bm_rtree(zone, clear_nosave_free);
731  
732  	free_list_of_pages(bm->p_list, clear_nosave_free);
733  
734  	INIT_LIST_HEAD(&bm->zones);
735  }
736  
737  /**
738   * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
739   *
740   * Find the bit in memory bitmap @bm that corresponds to the given PFN.
741   * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
742   *
743   * Walk the radix tree to find the page containing the bit that represents @pfn
744   * and return the position of the bit in @addr and @bit_nr.
745   */
memory_bm_find_bit(struct memory_bitmap * bm,unsigned long pfn,void ** addr,unsigned int * bit_nr)746  static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
747  			      void **addr, unsigned int *bit_nr)
748  {
749  	struct mem_zone_bm_rtree *curr, *zone;
750  	struct rtree_node *node;
751  	int i, block_nr;
752  
753  	zone = bm->cur.zone;
754  
755  	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
756  		goto zone_found;
757  
758  	zone = NULL;
759  
760  	/* Find the right zone */
761  	list_for_each_entry(curr, &bm->zones, list) {
762  		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
763  			zone = curr;
764  			break;
765  		}
766  	}
767  
768  	if (!zone)
769  		return -EFAULT;
770  
771  zone_found:
772  	/*
773  	 * We have found the zone. Now walk the radix tree to find the leaf node
774  	 * for our PFN.
775  	 */
776  
777  	/*
778  	 * If the zone we wish to scan is the current zone and the
779  	 * pfn falls into the current node then we do not need to walk
780  	 * the tree.
781  	 */
782  	node = bm->cur.node;
783  	if (zone == bm->cur.zone &&
784  	    ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
785  		goto node_found;
786  
787  	node      = zone->rtree;
788  	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
789  
790  	for (i = zone->levels; i > 0; i--) {
791  		int index;
792  
793  		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
794  		index &= BM_RTREE_LEVEL_MASK;
795  		BUG_ON(node->data[index] == 0);
796  		node = (struct rtree_node *)node->data[index];
797  	}
798  
799  node_found:
800  	/* Update last position */
801  	bm->cur.zone = zone;
802  	bm->cur.node = node;
803  	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
804  	bm->cur.cur_pfn = pfn;
805  
806  	/* Set return values */
807  	*addr = node->data;
808  	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
809  
810  	return 0;
811  }
812  
memory_bm_set_bit(struct memory_bitmap * bm,unsigned long pfn)813  static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
814  {
815  	void *addr;
816  	unsigned int bit;
817  	int error;
818  
819  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
820  	BUG_ON(error);
821  	set_bit(bit, addr);
822  }
823  
mem_bm_set_bit_check(struct memory_bitmap * bm,unsigned long pfn)824  static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
825  {
826  	void *addr;
827  	unsigned int bit;
828  	int error;
829  
830  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
831  	if (!error)
832  		set_bit(bit, addr);
833  
834  	return error;
835  }
836  
memory_bm_clear_bit(struct memory_bitmap * bm,unsigned long pfn)837  static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
838  {
839  	void *addr;
840  	unsigned int bit;
841  	int error;
842  
843  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
844  	BUG_ON(error);
845  	clear_bit(bit, addr);
846  }
847  
memory_bm_clear_current(struct memory_bitmap * bm)848  static void memory_bm_clear_current(struct memory_bitmap *bm)
849  {
850  	int bit;
851  
852  	bit = max(bm->cur.node_bit - 1, 0);
853  	clear_bit(bit, bm->cur.node->data);
854  }
855  
memory_bm_get_current(struct memory_bitmap * bm)856  static unsigned long memory_bm_get_current(struct memory_bitmap *bm)
857  {
858  	return bm->cur.cur_pfn;
859  }
860  
memory_bm_test_bit(struct memory_bitmap * bm,unsigned long pfn)861  static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
862  {
863  	void *addr;
864  	unsigned int bit;
865  	int error;
866  
867  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
868  	BUG_ON(error);
869  	return test_bit(bit, addr);
870  }
871  
memory_bm_pfn_present(struct memory_bitmap * bm,unsigned long pfn)872  static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
873  {
874  	void *addr;
875  	unsigned int bit;
876  
877  	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
878  }
879  
880  /*
881   * rtree_next_node - Jump to the next leaf node.
882   *
883   * Set the position to the beginning of the next node in the
884   * memory bitmap. This is either the next node in the current
885   * zone's radix tree or the first node in the radix tree of the
886   * next zone.
887   *
888   * Return true if there is a next node, false otherwise.
889   */
rtree_next_node(struct memory_bitmap * bm)890  static bool rtree_next_node(struct memory_bitmap *bm)
891  {
892  	if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
893  		bm->cur.node = list_entry(bm->cur.node->list.next,
894  					  struct rtree_node, list);
895  		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
896  		bm->cur.node_bit  = 0;
897  		touch_softlockup_watchdog();
898  		return true;
899  	}
900  
901  	/* No more nodes, goto next zone */
902  	if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
903  		bm->cur.zone = list_entry(bm->cur.zone->list.next,
904  				  struct mem_zone_bm_rtree, list);
905  		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
906  					  struct rtree_node, list);
907  		bm->cur.node_pfn = 0;
908  		bm->cur.node_bit = 0;
909  		return true;
910  	}
911  
912  	/* No more zones */
913  	return false;
914  }
915  
916  /**
917   * memory_bm_next_pfn - Find the next set bit in a memory bitmap.
918   * @bm: Memory bitmap.
919   *
920   * Starting from the last returned position this function searches for the next
921   * set bit in @bm and returns the PFN represented by it.  If no more bits are
922   * set, BM_END_OF_MAP is returned.
923   *
924   * It is required to run memory_bm_position_reset() before the first call to
925   * this function for the given memory bitmap.
926   */
memory_bm_next_pfn(struct memory_bitmap * bm)927  static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
928  {
929  	unsigned long bits, pfn, pages;
930  	int bit;
931  
932  	do {
933  		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
934  		bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
935  		bit	  = find_next_bit(bm->cur.node->data, bits,
936  					  bm->cur.node_bit);
937  		if (bit < bits) {
938  			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
939  			bm->cur.node_bit = bit + 1;
940  			bm->cur.cur_pfn = pfn;
941  			return pfn;
942  		}
943  	} while (rtree_next_node(bm));
944  
945  	bm->cur.cur_pfn = BM_END_OF_MAP;
946  	return BM_END_OF_MAP;
947  }
948  
949  /*
950   * This structure represents a range of page frames the contents of which
951   * should not be saved during hibernation.
952   */
953  struct nosave_region {
954  	struct list_head list;
955  	unsigned long start_pfn;
956  	unsigned long end_pfn;
957  };
958  
959  static LIST_HEAD(nosave_regions);
960  
recycle_zone_bm_rtree(struct mem_zone_bm_rtree * zone)961  static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
962  {
963  	struct rtree_node *node;
964  
965  	list_for_each_entry(node, &zone->nodes, list)
966  		recycle_safe_page(node->data);
967  
968  	list_for_each_entry(node, &zone->leaves, list)
969  		recycle_safe_page(node->data);
970  }
971  
memory_bm_recycle(struct memory_bitmap * bm)972  static void memory_bm_recycle(struct memory_bitmap *bm)
973  {
974  	struct mem_zone_bm_rtree *zone;
975  	struct linked_page *p_list;
976  
977  	list_for_each_entry(zone, &bm->zones, list)
978  		recycle_zone_bm_rtree(zone);
979  
980  	p_list = bm->p_list;
981  	while (p_list) {
982  		struct linked_page *lp = p_list;
983  
984  		p_list = lp->next;
985  		recycle_safe_page(lp);
986  	}
987  }
988  
989  /**
990   * register_nosave_region - Register a region of unsaveable memory.
991   *
992   * Register a range of page frames the contents of which should not be saved
993   * during hibernation (to be used in the early initialization code).
994   */
register_nosave_region(unsigned long start_pfn,unsigned long end_pfn)995  void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
996  {
997  	struct nosave_region *region;
998  
999  	if (start_pfn >= end_pfn)
1000  		return;
1001  
1002  	if (!list_empty(&nosave_regions)) {
1003  		/* Try to extend the previous region (they should be sorted) */
1004  		region = list_entry(nosave_regions.prev,
1005  					struct nosave_region, list);
1006  		if (region->end_pfn == start_pfn) {
1007  			region->end_pfn = end_pfn;
1008  			goto Report;
1009  		}
1010  	}
1011  	/* This allocation cannot fail */
1012  	region = memblock_alloc(sizeof(struct nosave_region),
1013  				SMP_CACHE_BYTES);
1014  	if (!region)
1015  		panic("%s: Failed to allocate %zu bytes\n", __func__,
1016  		      sizeof(struct nosave_region));
1017  	region->start_pfn = start_pfn;
1018  	region->end_pfn = end_pfn;
1019  	list_add_tail(&region->list, &nosave_regions);
1020   Report:
1021  	pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
1022  		(unsigned long long) start_pfn << PAGE_SHIFT,
1023  		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
1024  }
1025  
1026  /*
1027   * Set bits in this map correspond to the page frames the contents of which
1028   * should not be saved during the suspend.
1029   */
1030  static struct memory_bitmap *forbidden_pages_map;
1031  
1032  /* Set bits in this map correspond to free page frames. */
1033  static struct memory_bitmap *free_pages_map;
1034  
1035  /*
1036   * Each page frame allocated for creating the image is marked by setting the
1037   * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
1038   */
1039  
swsusp_set_page_free(struct page * page)1040  void swsusp_set_page_free(struct page *page)
1041  {
1042  	if (free_pages_map)
1043  		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
1044  }
1045  
swsusp_page_is_free(struct page * page)1046  static int swsusp_page_is_free(struct page *page)
1047  {
1048  	return free_pages_map ?
1049  		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1050  }
1051  
swsusp_unset_page_free(struct page * page)1052  void swsusp_unset_page_free(struct page *page)
1053  {
1054  	if (free_pages_map)
1055  		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1056  }
1057  
swsusp_set_page_forbidden(struct page * page)1058  static void swsusp_set_page_forbidden(struct page *page)
1059  {
1060  	if (forbidden_pages_map)
1061  		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1062  }
1063  
swsusp_page_is_forbidden(struct page * page)1064  int swsusp_page_is_forbidden(struct page *page)
1065  {
1066  	return forbidden_pages_map ?
1067  		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1068  }
1069  
swsusp_unset_page_forbidden(struct page * page)1070  static void swsusp_unset_page_forbidden(struct page *page)
1071  {
1072  	if (forbidden_pages_map)
1073  		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1074  }
1075  
1076  /**
1077   * mark_nosave_pages - Mark pages that should not be saved.
1078   * @bm: Memory bitmap.
1079   *
1080   * Set the bits in @bm that correspond to the page frames the contents of which
1081   * should not be saved.
1082   */
mark_nosave_pages(struct memory_bitmap * bm)1083  static void mark_nosave_pages(struct memory_bitmap *bm)
1084  {
1085  	struct nosave_region *region;
1086  
1087  	if (list_empty(&nosave_regions))
1088  		return;
1089  
1090  	list_for_each_entry(region, &nosave_regions, list) {
1091  		unsigned long pfn;
1092  
1093  		pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1094  			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1095  			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1096  				- 1);
1097  
1098  		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1099  			if (pfn_valid(pfn)) {
1100  				/*
1101  				 * It is safe to ignore the result of
1102  				 * mem_bm_set_bit_check() here, since we won't
1103  				 * touch the PFNs for which the error is
1104  				 * returned anyway.
1105  				 */
1106  				mem_bm_set_bit_check(bm, pfn);
1107  			}
1108  	}
1109  }
1110  
1111  /**
1112   * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1113   *
1114   * Create bitmaps needed for marking page frames that should not be saved and
1115   * free page frames.  The forbidden_pages_map and free_pages_map pointers are
1116   * only modified if everything goes well, because we don't want the bits to be
1117   * touched before both bitmaps are set up.
1118   */
create_basic_memory_bitmaps(void)1119  int create_basic_memory_bitmaps(void)
1120  {
1121  	struct memory_bitmap *bm1, *bm2;
1122  	int error = 0;
1123  
1124  	if (forbidden_pages_map && free_pages_map)
1125  		return 0;
1126  	else
1127  		BUG_ON(forbidden_pages_map || free_pages_map);
1128  
1129  	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1130  	if (!bm1)
1131  		return -ENOMEM;
1132  
1133  	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1134  	if (error)
1135  		goto Free_first_object;
1136  
1137  	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1138  	if (!bm2)
1139  		goto Free_first_bitmap;
1140  
1141  	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1142  	if (error)
1143  		goto Free_second_object;
1144  
1145  	forbidden_pages_map = bm1;
1146  	free_pages_map = bm2;
1147  	mark_nosave_pages(forbidden_pages_map);
1148  
1149  	pr_debug("Basic memory bitmaps created\n");
1150  
1151  	return 0;
1152  
1153   Free_second_object:
1154  	kfree(bm2);
1155   Free_first_bitmap:
1156  	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1157   Free_first_object:
1158  	kfree(bm1);
1159  	return -ENOMEM;
1160  }
1161  
1162  /**
1163   * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1164   *
1165   * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
1166   * auxiliary pointers are necessary so that the bitmaps themselves are not
1167   * referred to while they are being freed.
1168   */
free_basic_memory_bitmaps(void)1169  void free_basic_memory_bitmaps(void)
1170  {
1171  	struct memory_bitmap *bm1, *bm2;
1172  
1173  	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1174  		return;
1175  
1176  	bm1 = forbidden_pages_map;
1177  	bm2 = free_pages_map;
1178  	forbidden_pages_map = NULL;
1179  	free_pages_map = NULL;
1180  	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1181  	kfree(bm1);
1182  	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1183  	kfree(bm2);
1184  
1185  	pr_debug("Basic memory bitmaps freed\n");
1186  }
1187  
clear_or_poison_free_page(struct page * page)1188  static void clear_or_poison_free_page(struct page *page)
1189  {
1190  	if (page_poisoning_enabled_static())
1191  		__kernel_poison_pages(page, 1);
1192  	else if (want_init_on_free())
1193  		clear_highpage(page);
1194  }
1195  
clear_or_poison_free_pages(void)1196  void clear_or_poison_free_pages(void)
1197  {
1198  	struct memory_bitmap *bm = free_pages_map;
1199  	unsigned long pfn;
1200  
1201  	if (WARN_ON(!(free_pages_map)))
1202  		return;
1203  
1204  	if (page_poisoning_enabled() || want_init_on_free()) {
1205  		memory_bm_position_reset(bm);
1206  		pfn = memory_bm_next_pfn(bm);
1207  		while (pfn != BM_END_OF_MAP) {
1208  			if (pfn_valid(pfn))
1209  				clear_or_poison_free_page(pfn_to_page(pfn));
1210  
1211  			pfn = memory_bm_next_pfn(bm);
1212  		}
1213  		memory_bm_position_reset(bm);
1214  		pr_info("free pages cleared after restore\n");
1215  	}
1216  }
1217  
1218  /**
1219   * snapshot_additional_pages - Estimate the number of extra pages needed.
1220   * @zone: Memory zone to carry out the computation for.
1221   *
1222   * Estimate the number of additional pages needed for setting up a hibernation
1223   * image data structures for @zone (usually, the returned value is greater than
1224   * the exact number).
1225   */
snapshot_additional_pages(struct zone * zone)1226  unsigned int snapshot_additional_pages(struct zone *zone)
1227  {
1228  	unsigned int rtree, nodes;
1229  
1230  	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1231  	rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1232  			      LINKED_PAGE_DATA_SIZE);
1233  	while (nodes > 1) {
1234  		nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1235  		rtree += nodes;
1236  	}
1237  
1238  	return 2 * rtree;
1239  }
1240  
1241  /*
1242   * Touch the watchdog for every WD_PAGE_COUNT pages.
1243   */
1244  #define WD_PAGE_COUNT	(128*1024)
1245  
mark_free_pages(struct zone * zone)1246  static void mark_free_pages(struct zone *zone)
1247  {
1248  	unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
1249  	unsigned long flags;
1250  	unsigned int order, t;
1251  	struct page *page;
1252  
1253  	if (zone_is_empty(zone))
1254  		return;
1255  
1256  	spin_lock_irqsave(&zone->lock, flags);
1257  
1258  	max_zone_pfn = zone_end_pfn(zone);
1259  	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1260  		if (pfn_valid(pfn)) {
1261  			page = pfn_to_page(pfn);
1262  
1263  			if (!--page_count) {
1264  				touch_nmi_watchdog();
1265  				page_count = WD_PAGE_COUNT;
1266  			}
1267  
1268  			if (page_zone(page) != zone)
1269  				continue;
1270  
1271  			if (!swsusp_page_is_forbidden(page))
1272  				swsusp_unset_page_free(page);
1273  		}
1274  
1275  	for_each_migratetype_order(order, t) {
1276  		list_for_each_entry(page,
1277  				&zone->free_area[order].free_list[t], buddy_list) {
1278  			unsigned long i;
1279  
1280  			pfn = page_to_pfn(page);
1281  			for (i = 0; i < (1UL << order); i++) {
1282  				if (!--page_count) {
1283  					touch_nmi_watchdog();
1284  					page_count = WD_PAGE_COUNT;
1285  				}
1286  				swsusp_set_page_free(pfn_to_page(pfn + i));
1287  			}
1288  		}
1289  	}
1290  	spin_unlock_irqrestore(&zone->lock, flags);
1291  }
1292  
1293  #ifdef CONFIG_HIGHMEM
1294  /**
1295   * count_free_highmem_pages - Compute the total number of free highmem pages.
1296   *
1297   * The returned number is system-wide.
1298   */
count_free_highmem_pages(void)1299  static unsigned int count_free_highmem_pages(void)
1300  {
1301  	struct zone *zone;
1302  	unsigned int cnt = 0;
1303  
1304  	for_each_populated_zone(zone)
1305  		if (is_highmem(zone))
1306  			cnt += zone_page_state(zone, NR_FREE_PAGES);
1307  
1308  	return cnt;
1309  }
1310  
1311  /**
1312   * saveable_highmem_page - Check if a highmem page is saveable.
1313   *
1314   * Determine whether a highmem page should be included in a hibernation image.
1315   *
1316   * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1317   * and it isn't part of a free chunk of pages.
1318   */
saveable_highmem_page(struct zone * zone,unsigned long pfn)1319  static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1320  {
1321  	struct page *page;
1322  
1323  	if (!pfn_valid(pfn))
1324  		return NULL;
1325  
1326  	page = pfn_to_online_page(pfn);
1327  	if (!page || page_zone(page) != zone)
1328  		return NULL;
1329  
1330  	BUG_ON(!PageHighMem(page));
1331  
1332  	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page))
1333  		return NULL;
1334  
1335  	if (PageReserved(page) || PageOffline(page))
1336  		return NULL;
1337  
1338  	if (page_is_guard(page))
1339  		return NULL;
1340  
1341  	return page;
1342  }
1343  
1344  /**
1345   * count_highmem_pages - Compute the total number of saveable highmem pages.
1346   */
count_highmem_pages(void)1347  static unsigned int count_highmem_pages(void)
1348  {
1349  	struct zone *zone;
1350  	unsigned int n = 0;
1351  
1352  	for_each_populated_zone(zone) {
1353  		unsigned long pfn, max_zone_pfn;
1354  
1355  		if (!is_highmem(zone))
1356  			continue;
1357  
1358  		mark_free_pages(zone);
1359  		max_zone_pfn = zone_end_pfn(zone);
1360  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1361  			if (saveable_highmem_page(zone, pfn))
1362  				n++;
1363  	}
1364  	return n;
1365  }
1366  #else
saveable_highmem_page(struct zone * z,unsigned long p)1367  static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1368  {
1369  	return NULL;
1370  }
1371  #endif /* CONFIG_HIGHMEM */
1372  
1373  /**
1374   * saveable_page - Check if the given page is saveable.
1375   *
1376   * Determine whether a non-highmem page should be included in a hibernation
1377   * image.
1378   *
1379   * We should save the page if it isn't Nosave, and is not in the range
1380   * of pages statically defined as 'unsaveable', and it isn't part of
1381   * a free chunk of pages.
1382   */
saveable_page(struct zone * zone,unsigned long pfn)1383  static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1384  {
1385  	struct page *page;
1386  
1387  	if (!pfn_valid(pfn))
1388  		return NULL;
1389  
1390  	page = pfn_to_online_page(pfn);
1391  	if (!page || page_zone(page) != zone)
1392  		return NULL;
1393  
1394  	BUG_ON(PageHighMem(page));
1395  
1396  	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1397  		return NULL;
1398  
1399  	if (PageOffline(page))
1400  		return NULL;
1401  
1402  	if (PageReserved(page)
1403  	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1404  		return NULL;
1405  
1406  	if (page_is_guard(page))
1407  		return NULL;
1408  
1409  	return page;
1410  }
1411  
1412  /**
1413   * count_data_pages - Compute the total number of saveable non-highmem pages.
1414   */
count_data_pages(void)1415  static unsigned int count_data_pages(void)
1416  {
1417  	struct zone *zone;
1418  	unsigned long pfn, max_zone_pfn;
1419  	unsigned int n = 0;
1420  
1421  	for_each_populated_zone(zone) {
1422  		if (is_highmem(zone))
1423  			continue;
1424  
1425  		mark_free_pages(zone);
1426  		max_zone_pfn = zone_end_pfn(zone);
1427  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1428  			if (saveable_page(zone, pfn))
1429  				n++;
1430  	}
1431  	return n;
1432  }
1433  
1434  /*
1435   * This is needed, because copy_page and memcpy are not usable for copying
1436   * task structs. Returns true if the page was filled with only zeros,
1437   * otherwise false.
1438   */
do_copy_page(long * dst,long * src)1439  static inline bool do_copy_page(long *dst, long *src)
1440  {
1441  	long z = 0;
1442  	int n;
1443  
1444  	for (n = PAGE_SIZE / sizeof(long); n; n--) {
1445  		z |= *src;
1446  		*dst++ = *src++;
1447  	}
1448  	return !z;
1449  }
1450  
1451  /**
1452   * safe_copy_page - Copy a page in a safe way.
1453   *
1454   * Check if the page we are going to copy is marked as present in the kernel
1455   * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1456   * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1457   * always returns 'true'. Returns true if the page was entirely composed of
1458   * zeros, otherwise it will return false.
1459   */
safe_copy_page(void * dst,struct page * s_page)1460  static bool safe_copy_page(void *dst, struct page *s_page)
1461  {
1462  	bool zeros_only;
1463  
1464  	if (kernel_page_present(s_page)) {
1465  		zeros_only = do_copy_page(dst, page_address(s_page));
1466  	} else {
1467  		hibernate_map_page(s_page);
1468  		zeros_only = do_copy_page(dst, page_address(s_page));
1469  		hibernate_unmap_page(s_page);
1470  	}
1471  	return zeros_only;
1472  }
1473  
1474  #ifdef CONFIG_HIGHMEM
page_is_saveable(struct zone * zone,unsigned long pfn)1475  static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1476  {
1477  	return is_highmem(zone) ?
1478  		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1479  }
1480  
copy_data_page(unsigned long dst_pfn,unsigned long src_pfn)1481  static bool copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1482  {
1483  	struct page *s_page, *d_page;
1484  	void *src, *dst;
1485  	bool zeros_only;
1486  
1487  	s_page = pfn_to_page(src_pfn);
1488  	d_page = pfn_to_page(dst_pfn);
1489  	if (PageHighMem(s_page)) {
1490  		src = kmap_atomic(s_page);
1491  		dst = kmap_atomic(d_page);
1492  		zeros_only = do_copy_page(dst, src);
1493  		kunmap_atomic(dst);
1494  		kunmap_atomic(src);
1495  	} else {
1496  		if (PageHighMem(d_page)) {
1497  			/*
1498  			 * The page pointed to by src may contain some kernel
1499  			 * data modified by kmap_atomic()
1500  			 */
1501  			zeros_only = safe_copy_page(buffer, s_page);
1502  			dst = kmap_atomic(d_page);
1503  			copy_page(dst, buffer);
1504  			kunmap_atomic(dst);
1505  		} else {
1506  			zeros_only = safe_copy_page(page_address(d_page), s_page);
1507  		}
1508  	}
1509  	return zeros_only;
1510  }
1511  #else
1512  #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1513  
copy_data_page(unsigned long dst_pfn,unsigned long src_pfn)1514  static inline int copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1515  {
1516  	return safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1517  				pfn_to_page(src_pfn));
1518  }
1519  #endif /* CONFIG_HIGHMEM */
1520  
1521  /*
1522   * Copy data pages will copy all pages into pages pulled from the copy_bm.
1523   * If a page was entirely filled with zeros it will be marked in the zero_bm.
1524   *
1525   * Returns the number of pages copied.
1526   */
copy_data_pages(struct memory_bitmap * copy_bm,struct memory_bitmap * orig_bm,struct memory_bitmap * zero_bm)1527  static unsigned long copy_data_pages(struct memory_bitmap *copy_bm,
1528  			    struct memory_bitmap *orig_bm,
1529  			    struct memory_bitmap *zero_bm)
1530  {
1531  	unsigned long copied_pages = 0;
1532  	struct zone *zone;
1533  	unsigned long pfn, copy_pfn;
1534  
1535  	for_each_populated_zone(zone) {
1536  		unsigned long max_zone_pfn;
1537  
1538  		mark_free_pages(zone);
1539  		max_zone_pfn = zone_end_pfn(zone);
1540  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1541  			if (page_is_saveable(zone, pfn))
1542  				memory_bm_set_bit(orig_bm, pfn);
1543  	}
1544  	memory_bm_position_reset(orig_bm);
1545  	memory_bm_position_reset(copy_bm);
1546  	copy_pfn = memory_bm_next_pfn(copy_bm);
1547  	for(;;) {
1548  		pfn = memory_bm_next_pfn(orig_bm);
1549  		if (unlikely(pfn == BM_END_OF_MAP))
1550  			break;
1551  		if (copy_data_page(copy_pfn, pfn)) {
1552  			memory_bm_set_bit(zero_bm, pfn);
1553  			/* Use this copy_pfn for a page that is not full of zeros */
1554  			continue;
1555  		}
1556  		copied_pages++;
1557  		copy_pfn = memory_bm_next_pfn(copy_bm);
1558  	}
1559  	return copied_pages;
1560  }
1561  
1562  /* Total number of image pages */
1563  static unsigned int nr_copy_pages;
1564  /* Number of pages needed for saving the original pfns of the image pages */
1565  static unsigned int nr_meta_pages;
1566  /* Number of zero pages */
1567  static unsigned int nr_zero_pages;
1568  
1569  /*
1570   * Numbers of normal and highmem page frames allocated for hibernation image
1571   * before suspending devices.
1572   */
1573  static unsigned int alloc_normal, alloc_highmem;
1574  /*
1575   * Memory bitmap used for marking saveable pages (during hibernation) or
1576   * hibernation image pages (during restore)
1577   */
1578  static struct memory_bitmap orig_bm;
1579  /*
1580   * Memory bitmap used during hibernation for marking allocated page frames that
1581   * will contain copies of saveable pages.  During restore it is initially used
1582   * for marking hibernation image pages, but then the set bits from it are
1583   * duplicated in @orig_bm and it is released.  On highmem systems it is next
1584   * used for marking "safe" highmem pages, but it has to be reinitialized for
1585   * this purpose.
1586   */
1587  static struct memory_bitmap copy_bm;
1588  
1589  /* Memory bitmap which tracks which saveable pages were zero filled. */
1590  static struct memory_bitmap zero_bm;
1591  
1592  /**
1593   * swsusp_free - Free pages allocated for hibernation image.
1594   *
1595   * Image pages are allocated before snapshot creation, so they need to be
1596   * released after resume.
1597   */
swsusp_free(void)1598  void swsusp_free(void)
1599  {
1600  	unsigned long fb_pfn, fr_pfn;
1601  
1602  	if (!forbidden_pages_map || !free_pages_map)
1603  		goto out;
1604  
1605  	memory_bm_position_reset(forbidden_pages_map);
1606  	memory_bm_position_reset(free_pages_map);
1607  
1608  loop:
1609  	fr_pfn = memory_bm_next_pfn(free_pages_map);
1610  	fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1611  
1612  	/*
1613  	 * Find the next bit set in both bitmaps. This is guaranteed to
1614  	 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1615  	 */
1616  	do {
1617  		if (fb_pfn < fr_pfn)
1618  			fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1619  		if (fr_pfn < fb_pfn)
1620  			fr_pfn = memory_bm_next_pfn(free_pages_map);
1621  	} while (fb_pfn != fr_pfn);
1622  
1623  	if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1624  		struct page *page = pfn_to_page(fr_pfn);
1625  
1626  		memory_bm_clear_current(forbidden_pages_map);
1627  		memory_bm_clear_current(free_pages_map);
1628  		hibernate_restore_unprotect_page(page_address(page));
1629  		__free_page(page);
1630  		goto loop;
1631  	}
1632  
1633  out:
1634  	nr_copy_pages = 0;
1635  	nr_meta_pages = 0;
1636  	nr_zero_pages = 0;
1637  	restore_pblist = NULL;
1638  	buffer = NULL;
1639  	alloc_normal = 0;
1640  	alloc_highmem = 0;
1641  	hibernate_restore_protection_end();
1642  }
1643  
1644  /* Helper functions used for the shrinking of memory. */
1645  
1646  #define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1647  
1648  /**
1649   * preallocate_image_pages - Allocate a number of pages for hibernation image.
1650   * @nr_pages: Number of page frames to allocate.
1651   * @mask: GFP flags to use for the allocation.
1652   *
1653   * Return value: Number of page frames actually allocated
1654   */
preallocate_image_pages(unsigned long nr_pages,gfp_t mask)1655  static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1656  {
1657  	unsigned long nr_alloc = 0;
1658  
1659  	while (nr_pages > 0) {
1660  		struct page *page;
1661  
1662  		page = alloc_image_page(mask);
1663  		if (!page)
1664  			break;
1665  		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1666  		if (PageHighMem(page))
1667  			alloc_highmem++;
1668  		else
1669  			alloc_normal++;
1670  		nr_pages--;
1671  		nr_alloc++;
1672  	}
1673  
1674  	return nr_alloc;
1675  }
1676  
preallocate_image_memory(unsigned long nr_pages,unsigned long avail_normal)1677  static unsigned long preallocate_image_memory(unsigned long nr_pages,
1678  					      unsigned long avail_normal)
1679  {
1680  	unsigned long alloc;
1681  
1682  	if (avail_normal <= alloc_normal)
1683  		return 0;
1684  
1685  	alloc = avail_normal - alloc_normal;
1686  	if (nr_pages < alloc)
1687  		alloc = nr_pages;
1688  
1689  	return preallocate_image_pages(alloc, GFP_IMAGE);
1690  }
1691  
1692  #ifdef CONFIG_HIGHMEM
preallocate_image_highmem(unsigned long nr_pages)1693  static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1694  {
1695  	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1696  }
1697  
1698  /**
1699   *  __fraction - Compute (an approximation of) x * (multiplier / base).
1700   */
__fraction(u64 x,u64 multiplier,u64 base)1701  static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1702  {
1703  	return div64_u64(x * multiplier, base);
1704  }
1705  
preallocate_highmem_fraction(unsigned long nr_pages,unsigned long highmem,unsigned long total)1706  static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1707  						  unsigned long highmem,
1708  						  unsigned long total)
1709  {
1710  	unsigned long alloc = __fraction(nr_pages, highmem, total);
1711  
1712  	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1713  }
1714  #else /* CONFIG_HIGHMEM */
preallocate_image_highmem(unsigned long nr_pages)1715  static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1716  {
1717  	return 0;
1718  }
1719  
preallocate_highmem_fraction(unsigned long nr_pages,unsigned long highmem,unsigned long total)1720  static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1721  							 unsigned long highmem,
1722  							 unsigned long total)
1723  {
1724  	return 0;
1725  }
1726  #endif /* CONFIG_HIGHMEM */
1727  
1728  /**
1729   * free_unnecessary_pages - Release preallocated pages not needed for the image.
1730   */
free_unnecessary_pages(void)1731  static unsigned long free_unnecessary_pages(void)
1732  {
1733  	unsigned long save, to_free_normal, to_free_highmem, free;
1734  
1735  	save = count_data_pages();
1736  	if (alloc_normal >= save) {
1737  		to_free_normal = alloc_normal - save;
1738  		save = 0;
1739  	} else {
1740  		to_free_normal = 0;
1741  		save -= alloc_normal;
1742  	}
1743  	save += count_highmem_pages();
1744  	if (alloc_highmem >= save) {
1745  		to_free_highmem = alloc_highmem - save;
1746  	} else {
1747  		to_free_highmem = 0;
1748  		save -= alloc_highmem;
1749  		if (to_free_normal > save)
1750  			to_free_normal -= save;
1751  		else
1752  			to_free_normal = 0;
1753  	}
1754  	free = to_free_normal + to_free_highmem;
1755  
1756  	memory_bm_position_reset(&copy_bm);
1757  
1758  	while (to_free_normal > 0 || to_free_highmem > 0) {
1759  		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1760  		struct page *page = pfn_to_page(pfn);
1761  
1762  		if (PageHighMem(page)) {
1763  			if (!to_free_highmem)
1764  				continue;
1765  			to_free_highmem--;
1766  			alloc_highmem--;
1767  		} else {
1768  			if (!to_free_normal)
1769  				continue;
1770  			to_free_normal--;
1771  			alloc_normal--;
1772  		}
1773  		memory_bm_clear_bit(&copy_bm, pfn);
1774  		swsusp_unset_page_forbidden(page);
1775  		swsusp_unset_page_free(page);
1776  		__free_page(page);
1777  	}
1778  
1779  	return free;
1780  }
1781  
1782  /**
1783   * minimum_image_size - Estimate the minimum acceptable size of an image.
1784   * @saveable: Number of saveable pages in the system.
1785   *
1786   * We want to avoid attempting to free too much memory too hard, so estimate the
1787   * minimum acceptable size of a hibernation image to use as the lower limit for
1788   * preallocating memory.
1789   *
1790   * We assume that the minimum image size should be proportional to
1791   *
1792   * [number of saveable pages] - [number of pages that can be freed in theory]
1793   *
1794   * where the second term is the sum of (1) reclaimable slab pages, (2) active
1795   * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1796   */
minimum_image_size(unsigned long saveable)1797  static unsigned long minimum_image_size(unsigned long saveable)
1798  {
1799  	unsigned long size;
1800  
1801  	size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
1802  		+ global_node_page_state(NR_ACTIVE_ANON)
1803  		+ global_node_page_state(NR_INACTIVE_ANON)
1804  		+ global_node_page_state(NR_ACTIVE_FILE)
1805  		+ global_node_page_state(NR_INACTIVE_FILE);
1806  
1807  	return saveable <= size ? 0 : saveable - size;
1808  }
1809  
1810  /**
1811   * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1812   *
1813   * To create a hibernation image it is necessary to make a copy of every page
1814   * frame in use.  We also need a number of page frames to be free during
1815   * hibernation for allocations made while saving the image and for device
1816   * drivers, in case they need to allocate memory from their hibernation
1817   * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1818   * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
1819   * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1820   * total number of available page frames and allocate at least
1821   *
1822   * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
1823   *  - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1824   *
1825   * of them, which corresponds to the maximum size of a hibernation image.
1826   *
1827   * If image_size is set below the number following from the above formula,
1828   * the preallocation of memory is continued until the total number of saveable
1829   * pages in the system is below the requested image size or the minimum
1830   * acceptable image size returned by minimum_image_size(), whichever is greater.
1831   */
hibernate_preallocate_memory(void)1832  int hibernate_preallocate_memory(void)
1833  {
1834  	struct zone *zone;
1835  	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1836  	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1837  	ktime_t start, stop;
1838  	int error;
1839  
1840  	pr_info("Preallocating image memory\n");
1841  	start = ktime_get();
1842  
1843  	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1844  	if (error) {
1845  		pr_err("Cannot allocate original bitmap\n");
1846  		goto err_out;
1847  	}
1848  
1849  	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1850  	if (error) {
1851  		pr_err("Cannot allocate copy bitmap\n");
1852  		goto err_out;
1853  	}
1854  
1855  	error = memory_bm_create(&zero_bm, GFP_IMAGE, PG_ANY);
1856  	if (error) {
1857  		pr_err("Cannot allocate zero bitmap\n");
1858  		goto err_out;
1859  	}
1860  
1861  	alloc_normal = 0;
1862  	alloc_highmem = 0;
1863  	nr_zero_pages = 0;
1864  
1865  	/* Count the number of saveable data pages. */
1866  	save_highmem = count_highmem_pages();
1867  	saveable = count_data_pages();
1868  
1869  	/*
1870  	 * Compute the total number of page frames we can use (count) and the
1871  	 * number of pages needed for image metadata (size).
1872  	 */
1873  	count = saveable;
1874  	saveable += save_highmem;
1875  	highmem = save_highmem;
1876  	size = 0;
1877  	for_each_populated_zone(zone) {
1878  		size += snapshot_additional_pages(zone);
1879  		if (is_highmem(zone))
1880  			highmem += zone_page_state(zone, NR_FREE_PAGES);
1881  		else
1882  			count += zone_page_state(zone, NR_FREE_PAGES);
1883  	}
1884  	avail_normal = count;
1885  	count += highmem;
1886  	count -= totalreserve_pages;
1887  
1888  	/* Compute the maximum number of saveable pages to leave in memory. */
1889  	max_size = (count - (size + PAGES_FOR_IO)) / 2
1890  			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1891  	/* Compute the desired number of image pages specified by image_size. */
1892  	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1893  	if (size > max_size)
1894  		size = max_size;
1895  	/*
1896  	 * If the desired number of image pages is at least as large as the
1897  	 * current number of saveable pages in memory, allocate page frames for
1898  	 * the image and we're done.
1899  	 */
1900  	if (size >= saveable) {
1901  		pages = preallocate_image_highmem(save_highmem);
1902  		pages += preallocate_image_memory(saveable - pages, avail_normal);
1903  		goto out;
1904  	}
1905  
1906  	/* Estimate the minimum size of the image. */
1907  	pages = minimum_image_size(saveable);
1908  	/*
1909  	 * To avoid excessive pressure on the normal zone, leave room in it to
1910  	 * accommodate an image of the minimum size (unless it's already too
1911  	 * small, in which case don't preallocate pages from it at all).
1912  	 */
1913  	if (avail_normal > pages)
1914  		avail_normal -= pages;
1915  	else
1916  		avail_normal = 0;
1917  	if (size < pages)
1918  		size = min_t(unsigned long, pages, max_size);
1919  
1920  	/*
1921  	 * Let the memory management subsystem know that we're going to need a
1922  	 * large number of page frames to allocate and make it free some memory.
1923  	 * NOTE: If this is not done, performance will be hurt badly in some
1924  	 * test cases.
1925  	 */
1926  	shrink_all_memory(saveable - size);
1927  
1928  	/*
1929  	 * The number of saveable pages in memory was too high, so apply some
1930  	 * pressure to decrease it.  First, make room for the largest possible
1931  	 * image and fail if that doesn't work.  Next, try to decrease the size
1932  	 * of the image as much as indicated by 'size' using allocations from
1933  	 * highmem and non-highmem zones separately.
1934  	 */
1935  	pages_highmem = preallocate_image_highmem(highmem / 2);
1936  	alloc = count - max_size;
1937  	if (alloc > pages_highmem)
1938  		alloc -= pages_highmem;
1939  	else
1940  		alloc = 0;
1941  	pages = preallocate_image_memory(alloc, avail_normal);
1942  	if (pages < alloc) {
1943  		/* We have exhausted non-highmem pages, try highmem. */
1944  		alloc -= pages;
1945  		pages += pages_highmem;
1946  		pages_highmem = preallocate_image_highmem(alloc);
1947  		if (pages_highmem < alloc) {
1948  			pr_err("Image allocation is %lu pages short\n",
1949  				alloc - pages_highmem);
1950  			goto err_out;
1951  		}
1952  		pages += pages_highmem;
1953  		/*
1954  		 * size is the desired number of saveable pages to leave in
1955  		 * memory, so try to preallocate (all memory - size) pages.
1956  		 */
1957  		alloc = (count - pages) - size;
1958  		pages += preallocate_image_highmem(alloc);
1959  	} else {
1960  		/*
1961  		 * There are approximately max_size saveable pages at this point
1962  		 * and we want to reduce this number down to size.
1963  		 */
1964  		alloc = max_size - size;
1965  		size = preallocate_highmem_fraction(alloc, highmem, count);
1966  		pages_highmem += size;
1967  		alloc -= size;
1968  		size = preallocate_image_memory(alloc, avail_normal);
1969  		pages_highmem += preallocate_image_highmem(alloc - size);
1970  		pages += pages_highmem + size;
1971  	}
1972  
1973  	/*
1974  	 * We only need as many page frames for the image as there are saveable
1975  	 * pages in memory, but we have allocated more.  Release the excessive
1976  	 * ones now.
1977  	 */
1978  	pages -= free_unnecessary_pages();
1979  
1980   out:
1981  	stop = ktime_get();
1982  	pr_info("Allocated %lu pages for snapshot\n", pages);
1983  	swsusp_show_speed(start, stop, pages, "Allocated");
1984  
1985  	return 0;
1986  
1987   err_out:
1988  	swsusp_free();
1989  	return -ENOMEM;
1990  }
1991  
1992  #ifdef CONFIG_HIGHMEM
1993  /**
1994   * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1995   *
1996   * Compute the number of non-highmem pages that will be necessary for creating
1997   * copies of highmem pages.
1998   */
count_pages_for_highmem(unsigned int nr_highmem)1999  static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
2000  {
2001  	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
2002  
2003  	if (free_highmem >= nr_highmem)
2004  		nr_highmem = 0;
2005  	else
2006  		nr_highmem -= free_highmem;
2007  
2008  	return nr_highmem;
2009  }
2010  #else
count_pages_for_highmem(unsigned int nr_highmem)2011  static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
2012  #endif /* CONFIG_HIGHMEM */
2013  
2014  /**
2015   * enough_free_mem - Check if there is enough free memory for the image.
2016   */
enough_free_mem(unsigned int nr_pages,unsigned int nr_highmem)2017  static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
2018  {
2019  	struct zone *zone;
2020  	unsigned int free = alloc_normal;
2021  
2022  	for_each_populated_zone(zone)
2023  		if (!is_highmem(zone))
2024  			free += zone_page_state(zone, NR_FREE_PAGES);
2025  
2026  	nr_pages += count_pages_for_highmem(nr_highmem);
2027  	pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
2028  		 nr_pages, PAGES_FOR_IO, free);
2029  
2030  	return free > nr_pages + PAGES_FOR_IO;
2031  }
2032  
2033  #ifdef CONFIG_HIGHMEM
2034  /**
2035   * get_highmem_buffer - Allocate a buffer for highmem pages.
2036   *
2037   * If there are some highmem pages in the hibernation image, we may need a
2038   * buffer to copy them and/or load their data.
2039   */
get_highmem_buffer(int safe_needed)2040  static inline int get_highmem_buffer(int safe_needed)
2041  {
2042  	buffer = get_image_page(GFP_ATOMIC, safe_needed);
2043  	return buffer ? 0 : -ENOMEM;
2044  }
2045  
2046  /**
2047   * alloc_highmem_pages - Allocate some highmem pages for the image.
2048   *
2049   * Try to allocate as many pages as needed, but if the number of free highmem
2050   * pages is less than that, allocate them all.
2051   */
alloc_highmem_pages(struct memory_bitmap * bm,unsigned int nr_highmem)2052  static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
2053  					       unsigned int nr_highmem)
2054  {
2055  	unsigned int to_alloc = count_free_highmem_pages();
2056  
2057  	if (to_alloc > nr_highmem)
2058  		to_alloc = nr_highmem;
2059  
2060  	nr_highmem -= to_alloc;
2061  	while (to_alloc-- > 0) {
2062  		struct page *page;
2063  
2064  		page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
2065  		memory_bm_set_bit(bm, page_to_pfn(page));
2066  	}
2067  	return nr_highmem;
2068  }
2069  #else
get_highmem_buffer(int safe_needed)2070  static inline int get_highmem_buffer(int safe_needed) { return 0; }
2071  
alloc_highmem_pages(struct memory_bitmap * bm,unsigned int n)2072  static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
2073  					       unsigned int n) { return 0; }
2074  #endif /* CONFIG_HIGHMEM */
2075  
2076  /**
2077   * swsusp_alloc - Allocate memory for hibernation image.
2078   *
2079   * We first try to allocate as many highmem pages as there are
2080   * saveable highmem pages in the system.  If that fails, we allocate
2081   * non-highmem pages for the copies of the remaining highmem ones.
2082   *
2083   * In this approach it is likely that the copies of highmem pages will
2084   * also be located in the high memory, because of the way in which
2085   * copy_data_pages() works.
2086   */
swsusp_alloc(struct memory_bitmap * copy_bm,unsigned int nr_pages,unsigned int nr_highmem)2087  static int swsusp_alloc(struct memory_bitmap *copy_bm,
2088  			unsigned int nr_pages, unsigned int nr_highmem)
2089  {
2090  	if (nr_highmem > 0) {
2091  		if (get_highmem_buffer(PG_ANY))
2092  			goto err_out;
2093  		if (nr_highmem > alloc_highmem) {
2094  			nr_highmem -= alloc_highmem;
2095  			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
2096  		}
2097  	}
2098  	if (nr_pages > alloc_normal) {
2099  		nr_pages -= alloc_normal;
2100  		while (nr_pages-- > 0) {
2101  			struct page *page;
2102  
2103  			page = alloc_image_page(GFP_ATOMIC);
2104  			if (!page)
2105  				goto err_out;
2106  			memory_bm_set_bit(copy_bm, page_to_pfn(page));
2107  		}
2108  	}
2109  
2110  	return 0;
2111  
2112   err_out:
2113  	swsusp_free();
2114  	return -ENOMEM;
2115  }
2116  
swsusp_save(void)2117  asmlinkage __visible int swsusp_save(void)
2118  {
2119  	unsigned int nr_pages, nr_highmem;
2120  
2121  	pr_info("Creating image:\n");
2122  
2123  	drain_local_pages(NULL);
2124  	nr_pages = count_data_pages();
2125  	nr_highmem = count_highmem_pages();
2126  	pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
2127  
2128  	if (!enough_free_mem(nr_pages, nr_highmem)) {
2129  		pr_err("Not enough free memory\n");
2130  		return -ENOMEM;
2131  	}
2132  
2133  	if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
2134  		pr_err("Memory allocation failed\n");
2135  		return -ENOMEM;
2136  	}
2137  
2138  	/*
2139  	 * During allocating of suspend pagedir, new cold pages may appear.
2140  	 * Kill them.
2141  	 */
2142  	drain_local_pages(NULL);
2143  	nr_copy_pages = copy_data_pages(&copy_bm, &orig_bm, &zero_bm);
2144  
2145  	/*
2146  	 * End of critical section. From now on, we can write to memory,
2147  	 * but we should not touch disk. This specially means we must _not_
2148  	 * touch swap space! Except we must write out our image of course.
2149  	 */
2150  	nr_pages += nr_highmem;
2151  	/* We don't actually copy the zero pages */
2152  	nr_zero_pages = nr_pages - nr_copy_pages;
2153  	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2154  
2155  	pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages);
2156  
2157  	return 0;
2158  }
2159  
2160  #ifndef CONFIG_ARCH_HIBERNATION_HEADER
init_header_complete(struct swsusp_info * info)2161  static int init_header_complete(struct swsusp_info *info)
2162  {
2163  	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2164  	info->version_code = LINUX_VERSION_CODE;
2165  	return 0;
2166  }
2167  
check_image_kernel(struct swsusp_info * info)2168  static const char *check_image_kernel(struct swsusp_info *info)
2169  {
2170  	if (info->version_code != LINUX_VERSION_CODE)
2171  		return "kernel version";
2172  	if (strcmp(info->uts.sysname,init_utsname()->sysname))
2173  		return "system type";
2174  	if (strcmp(info->uts.release,init_utsname()->release))
2175  		return "kernel release";
2176  	if (strcmp(info->uts.version,init_utsname()->version))
2177  		return "version";
2178  	if (strcmp(info->uts.machine,init_utsname()->machine))
2179  		return "machine";
2180  	return NULL;
2181  }
2182  #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2183  
snapshot_get_image_size(void)2184  unsigned long snapshot_get_image_size(void)
2185  {
2186  	return nr_copy_pages + nr_meta_pages + 1;
2187  }
2188  
init_header(struct swsusp_info * info)2189  static int init_header(struct swsusp_info *info)
2190  {
2191  	memset(info, 0, sizeof(struct swsusp_info));
2192  	info->num_physpages = get_num_physpages();
2193  	info->image_pages = nr_copy_pages;
2194  	info->pages = snapshot_get_image_size();
2195  	info->size = info->pages;
2196  	info->size <<= PAGE_SHIFT;
2197  	return init_header_complete(info);
2198  }
2199  
2200  #define ENCODED_PFN_ZERO_FLAG ((unsigned long)1 << (BITS_PER_LONG - 1))
2201  #define ENCODED_PFN_MASK (~ENCODED_PFN_ZERO_FLAG)
2202  
2203  /**
2204   * pack_pfns - Prepare PFNs for saving.
2205   * @bm: Memory bitmap.
2206   * @buf: Memory buffer to store the PFNs in.
2207   * @zero_bm: Memory bitmap containing PFNs of zero pages.
2208   *
2209   * PFNs corresponding to set bits in @bm are stored in the area of memory
2210   * pointed to by @buf (1 page at a time). Pages which were filled with only
2211   * zeros will have the highest bit set in the packed format to distinguish
2212   * them from PFNs which will be contained in the image file.
2213   */
pack_pfns(unsigned long * buf,struct memory_bitmap * bm,struct memory_bitmap * zero_bm)2214  static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm,
2215  		struct memory_bitmap *zero_bm)
2216  {
2217  	int j;
2218  
2219  	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2220  		buf[j] = memory_bm_next_pfn(bm);
2221  		if (unlikely(buf[j] == BM_END_OF_MAP))
2222  			break;
2223  		if (memory_bm_test_bit(zero_bm, buf[j]))
2224  			buf[j] |= ENCODED_PFN_ZERO_FLAG;
2225  	}
2226  }
2227  
2228  /**
2229   * snapshot_read_next - Get the address to read the next image page from.
2230   * @handle: Snapshot handle to be used for the reading.
2231   *
2232   * On the first call, @handle should point to a zeroed snapshot_handle
2233   * structure.  The structure gets populated then and a pointer to it should be
2234   * passed to this function every next time.
2235   *
2236   * On success, the function returns a positive number.  Then, the caller
2237   * is allowed to read up to the returned number of bytes from the memory
2238   * location computed by the data_of() macro.
2239   *
2240   * The function returns 0 to indicate the end of the data stream condition,
2241   * and negative numbers are returned on errors.  If that happens, the structure
2242   * pointed to by @handle is not updated and should not be used any more.
2243   */
snapshot_read_next(struct snapshot_handle * handle)2244  int snapshot_read_next(struct snapshot_handle *handle)
2245  {
2246  	if (handle->cur > nr_meta_pages + nr_copy_pages)
2247  		return 0;
2248  
2249  	if (!buffer) {
2250  		/* This makes the buffer be freed by swsusp_free() */
2251  		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2252  		if (!buffer)
2253  			return -ENOMEM;
2254  	}
2255  	if (!handle->cur) {
2256  		int error;
2257  
2258  		error = init_header((struct swsusp_info *)buffer);
2259  		if (error)
2260  			return error;
2261  		handle->buffer = buffer;
2262  		memory_bm_position_reset(&orig_bm);
2263  		memory_bm_position_reset(&copy_bm);
2264  	} else if (handle->cur <= nr_meta_pages) {
2265  		clear_page(buffer);
2266  		pack_pfns(buffer, &orig_bm, &zero_bm);
2267  	} else {
2268  		struct page *page;
2269  
2270  		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2271  		if (PageHighMem(page)) {
2272  			/*
2273  			 * Highmem pages are copied to the buffer,
2274  			 * because we can't return with a kmapped
2275  			 * highmem page (we may not be called again).
2276  			 */
2277  			void *kaddr;
2278  
2279  			kaddr = kmap_atomic(page);
2280  			copy_page(buffer, kaddr);
2281  			kunmap_atomic(kaddr);
2282  			handle->buffer = buffer;
2283  		} else {
2284  			handle->buffer = page_address(page);
2285  		}
2286  	}
2287  	handle->cur++;
2288  	return PAGE_SIZE;
2289  }
2290  
duplicate_memory_bitmap(struct memory_bitmap * dst,struct memory_bitmap * src)2291  static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2292  				    struct memory_bitmap *src)
2293  {
2294  	unsigned long pfn;
2295  
2296  	memory_bm_position_reset(src);
2297  	pfn = memory_bm_next_pfn(src);
2298  	while (pfn != BM_END_OF_MAP) {
2299  		memory_bm_set_bit(dst, pfn);
2300  		pfn = memory_bm_next_pfn(src);
2301  	}
2302  }
2303  
2304  /**
2305   * mark_unsafe_pages - Mark pages that were used before hibernation.
2306   *
2307   * Mark the pages that cannot be used for storing the image during restoration,
2308   * because they conflict with the pages that had been used before hibernation.
2309   */
mark_unsafe_pages(struct memory_bitmap * bm)2310  static void mark_unsafe_pages(struct memory_bitmap *bm)
2311  {
2312  	unsigned long pfn;
2313  
2314  	/* Clear the "free"/"unsafe" bit for all PFNs */
2315  	memory_bm_position_reset(free_pages_map);
2316  	pfn = memory_bm_next_pfn(free_pages_map);
2317  	while (pfn != BM_END_OF_MAP) {
2318  		memory_bm_clear_current(free_pages_map);
2319  		pfn = memory_bm_next_pfn(free_pages_map);
2320  	}
2321  
2322  	/* Mark pages that correspond to the "original" PFNs as "unsafe" */
2323  	duplicate_memory_bitmap(free_pages_map, bm);
2324  
2325  	allocated_unsafe_pages = 0;
2326  }
2327  
check_header(struct swsusp_info * info)2328  static int check_header(struct swsusp_info *info)
2329  {
2330  	const char *reason;
2331  
2332  	reason = check_image_kernel(info);
2333  	if (!reason && info->num_physpages != get_num_physpages())
2334  		reason = "memory size";
2335  	if (reason) {
2336  		pr_err("Image mismatch: %s\n", reason);
2337  		return -EPERM;
2338  	}
2339  	return 0;
2340  }
2341  
2342  /**
2343   * load_header - Check the image header and copy the data from it.
2344   */
load_header(struct swsusp_info * info)2345  static int load_header(struct swsusp_info *info)
2346  {
2347  	int error;
2348  
2349  	restore_pblist = NULL;
2350  	error = check_header(info);
2351  	if (!error) {
2352  		nr_copy_pages = info->image_pages;
2353  		nr_meta_pages = info->pages - info->image_pages - 1;
2354  	}
2355  	return error;
2356  }
2357  
2358  /**
2359   * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2360   * @bm: Memory bitmap.
2361   * @buf: Area of memory containing the PFNs.
2362   * @zero_bm: Memory bitmap with the zero PFNs marked.
2363   *
2364   * For each element of the array pointed to by @buf (1 page at a time), set the
2365   * corresponding bit in @bm. If the page was originally populated with only
2366   * zeros then a corresponding bit will also be set in @zero_bm.
2367   */
unpack_orig_pfns(unsigned long * buf,struct memory_bitmap * bm,struct memory_bitmap * zero_bm)2368  static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm,
2369  		struct memory_bitmap *zero_bm)
2370  {
2371  	unsigned long decoded_pfn;
2372          bool zero;
2373  	int j;
2374  
2375  	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2376  		if (unlikely(buf[j] == BM_END_OF_MAP))
2377  			break;
2378  
2379  		zero = !!(buf[j] & ENCODED_PFN_ZERO_FLAG);
2380  		decoded_pfn = buf[j] & ENCODED_PFN_MASK;
2381  		if (pfn_valid(decoded_pfn) && memory_bm_pfn_present(bm, decoded_pfn)) {
2382  			memory_bm_set_bit(bm, decoded_pfn);
2383  			if (zero) {
2384  				memory_bm_set_bit(zero_bm, decoded_pfn);
2385  				nr_zero_pages++;
2386  			}
2387  		} else {
2388  			if (!pfn_valid(decoded_pfn))
2389  				pr_err(FW_BUG "Memory map mismatch at 0x%llx after hibernation\n",
2390  				       (unsigned long long)PFN_PHYS(decoded_pfn));
2391  			return -EFAULT;
2392  		}
2393  	}
2394  
2395  	return 0;
2396  }
2397  
2398  #ifdef CONFIG_HIGHMEM
2399  /*
2400   * struct highmem_pbe is used for creating the list of highmem pages that
2401   * should be restored atomically during the resume from disk, because the page
2402   * frames they have occupied before the suspend are in use.
2403   */
2404  struct highmem_pbe {
2405  	struct page *copy_page;	/* data is here now */
2406  	struct page *orig_page;	/* data was here before the suspend */
2407  	struct highmem_pbe *next;
2408  };
2409  
2410  /*
2411   * List of highmem PBEs needed for restoring the highmem pages that were
2412   * allocated before the suspend and included in the suspend image, but have
2413   * also been allocated by the "resume" kernel, so their contents cannot be
2414   * written directly to their "original" page frames.
2415   */
2416  static struct highmem_pbe *highmem_pblist;
2417  
2418  /**
2419   * count_highmem_image_pages - Compute the number of highmem pages in the image.
2420   * @bm: Memory bitmap.
2421   *
2422   * The bits in @bm that correspond to image pages are assumed to be set.
2423   */
count_highmem_image_pages(struct memory_bitmap * bm)2424  static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2425  {
2426  	unsigned long pfn;
2427  	unsigned int cnt = 0;
2428  
2429  	memory_bm_position_reset(bm);
2430  	pfn = memory_bm_next_pfn(bm);
2431  	while (pfn != BM_END_OF_MAP) {
2432  		if (PageHighMem(pfn_to_page(pfn)))
2433  			cnt++;
2434  
2435  		pfn = memory_bm_next_pfn(bm);
2436  	}
2437  	return cnt;
2438  }
2439  
2440  static unsigned int safe_highmem_pages;
2441  
2442  static struct memory_bitmap *safe_highmem_bm;
2443  
2444  /**
2445   * prepare_highmem_image - Allocate memory for loading highmem data from image.
2446   * @bm: Pointer to an uninitialized memory bitmap structure.
2447   * @nr_highmem_p: Pointer to the number of highmem image pages.
2448   *
2449   * Try to allocate as many highmem pages as there are highmem image pages
2450   * (@nr_highmem_p points to the variable containing the number of highmem image
2451   * pages).  The pages that are "safe" (ie. will not be overwritten when the
2452   * hibernation image is restored entirely) have the corresponding bits set in
2453   * @bm (it must be uninitialized).
2454   *
2455   * NOTE: This function should not be called if there are no highmem image pages.
2456   */
prepare_highmem_image(struct memory_bitmap * bm,unsigned int * nr_highmem_p)2457  static int prepare_highmem_image(struct memory_bitmap *bm,
2458  				 unsigned int *nr_highmem_p)
2459  {
2460  	unsigned int to_alloc;
2461  
2462  	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2463  		return -ENOMEM;
2464  
2465  	if (get_highmem_buffer(PG_SAFE))
2466  		return -ENOMEM;
2467  
2468  	to_alloc = count_free_highmem_pages();
2469  	if (to_alloc > *nr_highmem_p)
2470  		to_alloc = *nr_highmem_p;
2471  	else
2472  		*nr_highmem_p = to_alloc;
2473  
2474  	safe_highmem_pages = 0;
2475  	while (to_alloc-- > 0) {
2476  		struct page *page;
2477  
2478  		page = alloc_page(__GFP_HIGHMEM);
2479  		if (!swsusp_page_is_free(page)) {
2480  			/* The page is "safe", set its bit the bitmap */
2481  			memory_bm_set_bit(bm, page_to_pfn(page));
2482  			safe_highmem_pages++;
2483  		}
2484  		/* Mark the page as allocated */
2485  		swsusp_set_page_forbidden(page);
2486  		swsusp_set_page_free(page);
2487  	}
2488  	memory_bm_position_reset(bm);
2489  	safe_highmem_bm = bm;
2490  	return 0;
2491  }
2492  
2493  static struct page *last_highmem_page;
2494  
2495  /**
2496   * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2497   *
2498   * For a given highmem image page get a buffer that suspend_write_next() should
2499   * return to its caller to write to.
2500   *
2501   * If the page is to be saved to its "original" page frame or a copy of
2502   * the page is to be made in the highmem, @buffer is returned.  Otherwise,
2503   * the copy of the page is to be made in normal memory, so the address of
2504   * the copy is returned.
2505   *
2506   * If @buffer is returned, the caller of suspend_write_next() will write
2507   * the page's contents to @buffer, so they will have to be copied to the
2508   * right location on the next call to suspend_write_next() and it is done
2509   * with the help of copy_last_highmem_page().  For this purpose, if
2510   * @buffer is returned, @last_highmem_page is set to the page to which
2511   * the data will have to be copied from @buffer.
2512   */
get_highmem_page_buffer(struct page * page,struct chain_allocator * ca)2513  static void *get_highmem_page_buffer(struct page *page,
2514  				     struct chain_allocator *ca)
2515  {
2516  	struct highmem_pbe *pbe;
2517  	void *kaddr;
2518  
2519  	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2520  		/*
2521  		 * We have allocated the "original" page frame and we can
2522  		 * use it directly to store the loaded page.
2523  		 */
2524  		last_highmem_page = page;
2525  		return buffer;
2526  	}
2527  	/*
2528  	 * The "original" page frame has not been allocated and we have to
2529  	 * use a "safe" page frame to store the loaded page.
2530  	 */
2531  	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2532  	if (!pbe) {
2533  		swsusp_free();
2534  		return ERR_PTR(-ENOMEM);
2535  	}
2536  	pbe->orig_page = page;
2537  	if (safe_highmem_pages > 0) {
2538  		struct page *tmp;
2539  
2540  		/* Copy of the page will be stored in high memory */
2541  		kaddr = buffer;
2542  		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2543  		safe_highmem_pages--;
2544  		last_highmem_page = tmp;
2545  		pbe->copy_page = tmp;
2546  	} else {
2547  		/* Copy of the page will be stored in normal memory */
2548  		kaddr = safe_pages_list;
2549  		safe_pages_list = safe_pages_list->next;
2550  		pbe->copy_page = virt_to_page(kaddr);
2551  	}
2552  	pbe->next = highmem_pblist;
2553  	highmem_pblist = pbe;
2554  	return kaddr;
2555  }
2556  
2557  /**
2558   * copy_last_highmem_page - Copy most the most recent highmem image page.
2559   *
2560   * Copy the contents of a highmem image from @buffer, where the caller of
2561   * snapshot_write_next() has stored them, to the right location represented by
2562   * @last_highmem_page .
2563   */
copy_last_highmem_page(void)2564  static void copy_last_highmem_page(void)
2565  {
2566  	if (last_highmem_page) {
2567  		void *dst;
2568  
2569  		dst = kmap_atomic(last_highmem_page);
2570  		copy_page(dst, buffer);
2571  		kunmap_atomic(dst);
2572  		last_highmem_page = NULL;
2573  	}
2574  }
2575  
last_highmem_page_copied(void)2576  static inline int last_highmem_page_copied(void)
2577  {
2578  	return !last_highmem_page;
2579  }
2580  
free_highmem_data(void)2581  static inline void free_highmem_data(void)
2582  {
2583  	if (safe_highmem_bm)
2584  		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2585  
2586  	if (buffer)
2587  		free_image_page(buffer, PG_UNSAFE_CLEAR);
2588  }
2589  #else
count_highmem_image_pages(struct memory_bitmap * bm)2590  static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2591  
prepare_highmem_image(struct memory_bitmap * bm,unsigned int * nr_highmem_p)2592  static inline int prepare_highmem_image(struct memory_bitmap *bm,
2593  					unsigned int *nr_highmem_p) { return 0; }
2594  
get_highmem_page_buffer(struct page * page,struct chain_allocator * ca)2595  static inline void *get_highmem_page_buffer(struct page *page,
2596  					    struct chain_allocator *ca)
2597  {
2598  	return ERR_PTR(-EINVAL);
2599  }
2600  
copy_last_highmem_page(void)2601  static inline void copy_last_highmem_page(void) {}
last_highmem_page_copied(void)2602  static inline int last_highmem_page_copied(void) { return 1; }
free_highmem_data(void)2603  static inline void free_highmem_data(void) {}
2604  #endif /* CONFIG_HIGHMEM */
2605  
2606  #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2607  
2608  /**
2609   * prepare_image - Make room for loading hibernation image.
2610   * @new_bm: Uninitialized memory bitmap structure.
2611   * @bm: Memory bitmap with unsafe pages marked.
2612   * @zero_bm: Memory bitmap containing the zero pages.
2613   *
2614   * Use @bm to mark the pages that will be overwritten in the process of
2615   * restoring the system memory state from the suspend image ("unsafe" pages)
2616   * and allocate memory for the image.
2617   *
2618   * The idea is to allocate a new memory bitmap first and then allocate
2619   * as many pages as needed for image data, but without specifying what those
2620   * pages will be used for just yet.  Instead, we mark them all as allocated and
2621   * create a lists of "safe" pages to be used later.  On systems with high
2622   * memory a list of "safe" highmem pages is created too.
2623   *
2624   * Because it was not known which pages were unsafe when @zero_bm was created,
2625   * make a copy of it and recreate it within safe pages.
2626   */
prepare_image(struct memory_bitmap * new_bm,struct memory_bitmap * bm,struct memory_bitmap * zero_bm)2627  static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm,
2628  		struct memory_bitmap *zero_bm)
2629  {
2630  	unsigned int nr_pages, nr_highmem;
2631  	struct memory_bitmap tmp;
2632  	struct linked_page *lp;
2633  	int error;
2634  
2635  	/* If there is no highmem, the buffer will not be necessary */
2636  	free_image_page(buffer, PG_UNSAFE_CLEAR);
2637  	buffer = NULL;
2638  
2639  	nr_highmem = count_highmem_image_pages(bm);
2640  	mark_unsafe_pages(bm);
2641  
2642  	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2643  	if (error)
2644  		goto Free;
2645  
2646  	duplicate_memory_bitmap(new_bm, bm);
2647  	memory_bm_free(bm, PG_UNSAFE_KEEP);
2648  
2649  	/* Make a copy of zero_bm so it can be created in safe pages */
2650  	error = memory_bm_create(&tmp, GFP_ATOMIC, PG_SAFE);
2651  	if (error)
2652  		goto Free;
2653  
2654  	duplicate_memory_bitmap(&tmp, zero_bm);
2655  	memory_bm_free(zero_bm, PG_UNSAFE_KEEP);
2656  
2657  	/* Recreate zero_bm in safe pages */
2658  	error = memory_bm_create(zero_bm, GFP_ATOMIC, PG_SAFE);
2659  	if (error)
2660  		goto Free;
2661  
2662  	duplicate_memory_bitmap(zero_bm, &tmp);
2663  	memory_bm_free(&tmp, PG_UNSAFE_CLEAR);
2664  	/* At this point zero_bm is in safe pages and it can be used for restoring. */
2665  
2666  	if (nr_highmem > 0) {
2667  		error = prepare_highmem_image(bm, &nr_highmem);
2668  		if (error)
2669  			goto Free;
2670  	}
2671  	/*
2672  	 * Reserve some safe pages for potential later use.
2673  	 *
2674  	 * NOTE: This way we make sure there will be enough safe pages for the
2675  	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2676  	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2677  	 *
2678  	 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2679  	 */
2680  	nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2681  	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2682  	while (nr_pages > 0) {
2683  		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2684  		if (!lp) {
2685  			error = -ENOMEM;
2686  			goto Free;
2687  		}
2688  		lp->next = safe_pages_list;
2689  		safe_pages_list = lp;
2690  		nr_pages--;
2691  	}
2692  	/* Preallocate memory for the image */
2693  	nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2694  	while (nr_pages > 0) {
2695  		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2696  		if (!lp) {
2697  			error = -ENOMEM;
2698  			goto Free;
2699  		}
2700  		if (!swsusp_page_is_free(virt_to_page(lp))) {
2701  			/* The page is "safe", add it to the list */
2702  			lp->next = safe_pages_list;
2703  			safe_pages_list = lp;
2704  		}
2705  		/* Mark the page as allocated */
2706  		swsusp_set_page_forbidden(virt_to_page(lp));
2707  		swsusp_set_page_free(virt_to_page(lp));
2708  		nr_pages--;
2709  	}
2710  	return 0;
2711  
2712   Free:
2713  	swsusp_free();
2714  	return error;
2715  }
2716  
2717  /**
2718   * get_buffer - Get the address to store the next image data page.
2719   *
2720   * Get the address that snapshot_write_next() should return to its caller to
2721   * write to.
2722   */
get_buffer(struct memory_bitmap * bm,struct chain_allocator * ca)2723  static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2724  {
2725  	struct pbe *pbe;
2726  	struct page *page;
2727  	unsigned long pfn = memory_bm_next_pfn(bm);
2728  
2729  	if (pfn == BM_END_OF_MAP)
2730  		return ERR_PTR(-EFAULT);
2731  
2732  	page = pfn_to_page(pfn);
2733  	if (PageHighMem(page))
2734  		return get_highmem_page_buffer(page, ca);
2735  
2736  	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2737  		/*
2738  		 * We have allocated the "original" page frame and we can
2739  		 * use it directly to store the loaded page.
2740  		 */
2741  		return page_address(page);
2742  
2743  	/*
2744  	 * The "original" page frame has not been allocated and we have to
2745  	 * use a "safe" page frame to store the loaded page.
2746  	 */
2747  	pbe = chain_alloc(ca, sizeof(struct pbe));
2748  	if (!pbe) {
2749  		swsusp_free();
2750  		return ERR_PTR(-ENOMEM);
2751  	}
2752  	pbe->orig_address = page_address(page);
2753  	pbe->address = safe_pages_list;
2754  	safe_pages_list = safe_pages_list->next;
2755  	pbe->next = restore_pblist;
2756  	restore_pblist = pbe;
2757  	return pbe->address;
2758  }
2759  
2760  /**
2761   * snapshot_write_next - Get the address to store the next image page.
2762   * @handle: Snapshot handle structure to guide the writing.
2763   *
2764   * On the first call, @handle should point to a zeroed snapshot_handle
2765   * structure.  The structure gets populated then and a pointer to it should be
2766   * passed to this function every next time.
2767   *
2768   * On success, the function returns a positive number.  Then, the caller
2769   * is allowed to write up to the returned number of bytes to the memory
2770   * location computed by the data_of() macro.
2771   *
2772   * The function returns 0 to indicate the "end of file" condition.  Negative
2773   * numbers are returned on errors, in which cases the structure pointed to by
2774   * @handle is not updated and should not be used any more.
2775   */
snapshot_write_next(struct snapshot_handle * handle)2776  int snapshot_write_next(struct snapshot_handle *handle)
2777  {
2778  	static struct chain_allocator ca;
2779  	int error = 0;
2780  
2781  next:
2782  	/* Check if we have already loaded the entire image */
2783  	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
2784  		return 0;
2785  
2786  	handle->sync_read = 1;
2787  
2788  	if (!handle->cur) {
2789  		if (!buffer)
2790  			/* This makes the buffer be freed by swsusp_free() */
2791  			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2792  
2793  		if (!buffer)
2794  			return -ENOMEM;
2795  
2796  		handle->buffer = buffer;
2797  	} else if (handle->cur == 1) {
2798  		error = load_header(buffer);
2799  		if (error)
2800  			return error;
2801  
2802  		safe_pages_list = NULL;
2803  
2804  		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2805  		if (error)
2806  			return error;
2807  
2808  		error = memory_bm_create(&zero_bm, GFP_ATOMIC, PG_ANY);
2809  		if (error)
2810  			return error;
2811  
2812  		nr_zero_pages = 0;
2813  
2814  		hibernate_restore_protection_begin();
2815  	} else if (handle->cur <= nr_meta_pages + 1) {
2816  		error = unpack_orig_pfns(buffer, &copy_bm, &zero_bm);
2817  		if (error)
2818  			return error;
2819  
2820  		if (handle->cur == nr_meta_pages + 1) {
2821  			error = prepare_image(&orig_bm, &copy_bm, &zero_bm);
2822  			if (error)
2823  				return error;
2824  
2825  			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2826  			memory_bm_position_reset(&orig_bm);
2827  			memory_bm_position_reset(&zero_bm);
2828  			restore_pblist = NULL;
2829  			handle->buffer = get_buffer(&orig_bm, &ca);
2830  			handle->sync_read = 0;
2831  			if (IS_ERR(handle->buffer))
2832  				return PTR_ERR(handle->buffer);
2833  		}
2834  	} else {
2835  		copy_last_highmem_page();
2836  		hibernate_restore_protect_page(handle->buffer);
2837  		handle->buffer = get_buffer(&orig_bm, &ca);
2838  		if (IS_ERR(handle->buffer))
2839  			return PTR_ERR(handle->buffer);
2840  		if (handle->buffer != buffer)
2841  			handle->sync_read = 0;
2842  	}
2843  	handle->cur++;
2844  
2845  	/* Zero pages were not included in the image, memset it and move on. */
2846  	if (handle->cur > nr_meta_pages + 1 &&
2847  	    memory_bm_test_bit(&zero_bm, memory_bm_get_current(&orig_bm))) {
2848  		memset(handle->buffer, 0, PAGE_SIZE);
2849  		goto next;
2850  	}
2851  
2852  	return PAGE_SIZE;
2853  }
2854  
2855  /**
2856   * snapshot_write_finalize - Complete the loading of a hibernation image.
2857   *
2858   * Must be called after the last call to snapshot_write_next() in case the last
2859   * page in the image happens to be a highmem page and its contents should be
2860   * stored in highmem.  Additionally, it recycles bitmap memory that's not
2861   * necessary any more.
2862   */
snapshot_write_finalize(struct snapshot_handle * handle)2863  void snapshot_write_finalize(struct snapshot_handle *handle)
2864  {
2865  	copy_last_highmem_page();
2866  	hibernate_restore_protect_page(handle->buffer);
2867  	/* Do that only if we have loaded the image entirely */
2868  	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) {
2869  		memory_bm_recycle(&orig_bm);
2870  		free_highmem_data();
2871  	}
2872  }
2873  
snapshot_image_loaded(struct snapshot_handle * handle)2874  int snapshot_image_loaded(struct snapshot_handle *handle)
2875  {
2876  	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2877  			handle->cur <= nr_meta_pages + nr_copy_pages + nr_zero_pages);
2878  }
2879  
2880  #ifdef CONFIG_HIGHMEM
2881  /* Assumes that @buf is ready and points to a "safe" page */
swap_two_pages_data(struct page * p1,struct page * p2,void * buf)2882  static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2883  				       void *buf)
2884  {
2885  	void *kaddr1, *kaddr2;
2886  
2887  	kaddr1 = kmap_atomic(p1);
2888  	kaddr2 = kmap_atomic(p2);
2889  	copy_page(buf, kaddr1);
2890  	copy_page(kaddr1, kaddr2);
2891  	copy_page(kaddr2, buf);
2892  	kunmap_atomic(kaddr2);
2893  	kunmap_atomic(kaddr1);
2894  }
2895  
2896  /**
2897   * restore_highmem - Put highmem image pages into their original locations.
2898   *
2899   * For each highmem page that was in use before hibernation and is included in
2900   * the image, and also has been allocated by the "restore" kernel, swap its
2901   * current contents with the previous (ie. "before hibernation") ones.
2902   *
2903   * If the restore eventually fails, we can call this function once again and
2904   * restore the highmem state as seen by the restore kernel.
2905   */
restore_highmem(void)2906  int restore_highmem(void)
2907  {
2908  	struct highmem_pbe *pbe = highmem_pblist;
2909  	void *buf;
2910  
2911  	if (!pbe)
2912  		return 0;
2913  
2914  	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2915  	if (!buf)
2916  		return -ENOMEM;
2917  
2918  	while (pbe) {
2919  		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2920  		pbe = pbe->next;
2921  	}
2922  	free_image_page(buf, PG_UNSAFE_CLEAR);
2923  	return 0;
2924  }
2925  #endif /* CONFIG_HIGHMEM */
2926