1 /*
2 * kexec.c - kexec system call core code.
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/capability.h>
12 #include <linux/mm.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
15 #include <linux/fs.h>
16 #include <linux/kexec.h>
17 #include <linux/mutex.h>
18 #include <linux/list.h>
19 #include <linux/highmem.h>
20 #include <linux/syscalls.h>
21 #include <linux/reboot.h>
22 #include <linux/ioport.h>
23 #include <linux/hardirq.h>
24 #include <linux/elf.h>
25 #include <linux/elfcore.h>
26 #include <linux/utsname.h>
27 #include <linux/numa.h>
28 #include <linux/suspend.h>
29 #include <linux/device.h>
30 #include <linux/freezer.h>
31 #include <linux/pm.h>
32 #include <linux/cpu.h>
33 #include <linux/uaccess.h>
34 #include <linux/io.h>
35 #include <linux/console.h>
36 #include <linux/vmalloc.h>
37 #include <linux/swap.h>
38 #include <linux/syscore_ops.h>
39 #include <linux/compiler.h>
40 #include <linux/hugetlb.h>
41 #include <linux/frame.h>
42
43 #include <asm/page.h>
44 #include <asm/sections.h>
45
46 #include <crypto/hash.h>
47 #include <crypto/sha.h>
48 #include "kexec_internal.h"
49
50 DEFINE_MUTEX(kexec_mutex);
51
52 /* Per cpu memory for storing cpu states in case of system crash. */
53 note_buf_t __percpu *crash_notes;
54
55 /* Flag to indicate we are going to kexec a new kernel */
56 bool kexec_in_progress = false;
57
58
59 /* Location of the reserved area for the crash kernel */
60 struct resource crashk_res = {
61 .name = "Crash kernel",
62 .start = 0,
63 .end = 0,
64 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
65 .desc = IORES_DESC_CRASH_KERNEL
66 };
67 struct resource crashk_low_res = {
68 .name = "Crash kernel",
69 .start = 0,
70 .end = 0,
71 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
72 .desc = IORES_DESC_CRASH_KERNEL
73 };
74
kexec_should_crash(struct task_struct * p)75 int kexec_should_crash(struct task_struct *p)
76 {
77 /*
78 * If crash_kexec_post_notifiers is enabled, don't run
79 * crash_kexec() here yet, which must be run after panic
80 * notifiers in panic().
81 */
82 if (crash_kexec_post_notifiers)
83 return 0;
84 /*
85 * There are 4 panic() calls in do_exit() path, each of which
86 * corresponds to each of these 4 conditions.
87 */
88 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
89 return 1;
90 return 0;
91 }
92
kexec_crash_loaded(void)93 int kexec_crash_loaded(void)
94 {
95 return !!kexec_crash_image;
96 }
97 EXPORT_SYMBOL_GPL(kexec_crash_loaded);
98
99 /*
100 * When kexec transitions to the new kernel there is a one-to-one
101 * mapping between physical and virtual addresses. On processors
102 * where you can disable the MMU this is trivial, and easy. For
103 * others it is still a simple predictable page table to setup.
104 *
105 * In that environment kexec copies the new kernel to its final
106 * resting place. This means I can only support memory whose
107 * physical address can fit in an unsigned long. In particular
108 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
109 * If the assembly stub has more restrictive requirements
110 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
111 * defined more restrictively in <asm/kexec.h>.
112 *
113 * The code for the transition from the current kernel to the
114 * the new kernel is placed in the control_code_buffer, whose size
115 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
116 * page of memory is necessary, but some architectures require more.
117 * Because this memory must be identity mapped in the transition from
118 * virtual to physical addresses it must live in the range
119 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
120 * modifiable.
121 *
122 * The assembly stub in the control code buffer is passed a linked list
123 * of descriptor pages detailing the source pages of the new kernel,
124 * and the destination addresses of those source pages. As this data
125 * structure is not used in the context of the current OS, it must
126 * be self-contained.
127 *
128 * The code has been made to work with highmem pages and will use a
129 * destination page in its final resting place (if it happens
130 * to allocate it). The end product of this is that most of the
131 * physical address space, and most of RAM can be used.
132 *
133 * Future directions include:
134 * - allocating a page table with the control code buffer identity
135 * mapped, to simplify machine_kexec and make kexec_on_panic more
136 * reliable.
137 */
138
139 /*
140 * KIMAGE_NO_DEST is an impossible destination address..., for
141 * allocating pages whose destination address we do not care about.
142 */
143 #define KIMAGE_NO_DEST (-1UL)
144 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
145
146 static struct page *kimage_alloc_page(struct kimage *image,
147 gfp_t gfp_mask,
148 unsigned long dest);
149
sanity_check_segment_list(struct kimage * image)150 int sanity_check_segment_list(struct kimage *image)
151 {
152 int i;
153 unsigned long nr_segments = image->nr_segments;
154 unsigned long total_pages = 0;
155
156 /*
157 * Verify we have good destination addresses. The caller is
158 * responsible for making certain we don't attempt to load
159 * the new image into invalid or reserved areas of RAM. This
160 * just verifies it is an address we can use.
161 *
162 * Since the kernel does everything in page size chunks ensure
163 * the destination addresses are page aligned. Too many
164 * special cases crop of when we don't do this. The most
165 * insidious is getting overlapping destination addresses
166 * simply because addresses are changed to page size
167 * granularity.
168 */
169 for (i = 0; i < nr_segments; i++) {
170 unsigned long mstart, mend;
171
172 mstart = image->segment[i].mem;
173 mend = mstart + image->segment[i].memsz;
174 if (mstart > mend)
175 return -EADDRNOTAVAIL;
176 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
177 return -EADDRNOTAVAIL;
178 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
179 return -EADDRNOTAVAIL;
180 }
181
182 /* Verify our destination addresses do not overlap.
183 * If we alloed overlapping destination addresses
184 * through very weird things can happen with no
185 * easy explanation as one segment stops on another.
186 */
187 for (i = 0; i < nr_segments; i++) {
188 unsigned long mstart, mend;
189 unsigned long j;
190
191 mstart = image->segment[i].mem;
192 mend = mstart + image->segment[i].memsz;
193 for (j = 0; j < i; j++) {
194 unsigned long pstart, pend;
195
196 pstart = image->segment[j].mem;
197 pend = pstart + image->segment[j].memsz;
198 /* Do the segments overlap ? */
199 if ((mend > pstart) && (mstart < pend))
200 return -EINVAL;
201 }
202 }
203
204 /* Ensure our buffer sizes are strictly less than
205 * our memory sizes. This should always be the case,
206 * and it is easier to check up front than to be surprised
207 * later on.
208 */
209 for (i = 0; i < nr_segments; i++) {
210 if (image->segment[i].bufsz > image->segment[i].memsz)
211 return -EINVAL;
212 }
213
214 /*
215 * Verify that no more than half of memory will be consumed. If the
216 * request from userspace is too large, a large amount of time will be
217 * wasted allocating pages, which can cause a soft lockup.
218 */
219 for (i = 0; i < nr_segments; i++) {
220 if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
221 return -EINVAL;
222
223 total_pages += PAGE_COUNT(image->segment[i].memsz);
224 }
225
226 if (total_pages > totalram_pages / 2)
227 return -EINVAL;
228
229 /*
230 * Verify we have good destination addresses. Normally
231 * the caller is responsible for making certain we don't
232 * attempt to load the new image into invalid or reserved
233 * areas of RAM. But crash kernels are preloaded into a
234 * reserved area of ram. We must ensure the addresses
235 * are in the reserved area otherwise preloading the
236 * kernel could corrupt things.
237 */
238
239 if (image->type == KEXEC_TYPE_CRASH) {
240 for (i = 0; i < nr_segments; i++) {
241 unsigned long mstart, mend;
242
243 mstart = image->segment[i].mem;
244 mend = mstart + image->segment[i].memsz - 1;
245 /* Ensure we are within the crash kernel limits */
246 if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
247 (mend > phys_to_boot_phys(crashk_res.end)))
248 return -EADDRNOTAVAIL;
249 }
250 }
251
252 return 0;
253 }
254
do_kimage_alloc_init(void)255 struct kimage *do_kimage_alloc_init(void)
256 {
257 struct kimage *image;
258
259 /* Allocate a controlling structure */
260 image = kzalloc(sizeof(*image), GFP_KERNEL);
261 if (!image)
262 return NULL;
263
264 image->head = 0;
265 image->entry = &image->head;
266 image->last_entry = &image->head;
267 image->control_page = ~0; /* By default this does not apply */
268 image->type = KEXEC_TYPE_DEFAULT;
269
270 /* Initialize the list of control pages */
271 INIT_LIST_HEAD(&image->control_pages);
272
273 /* Initialize the list of destination pages */
274 INIT_LIST_HEAD(&image->dest_pages);
275
276 /* Initialize the list of unusable pages */
277 INIT_LIST_HEAD(&image->unusable_pages);
278
279 return image;
280 }
281
kimage_is_destination_range(struct kimage * image,unsigned long start,unsigned long end)282 int kimage_is_destination_range(struct kimage *image,
283 unsigned long start,
284 unsigned long end)
285 {
286 unsigned long i;
287
288 for (i = 0; i < image->nr_segments; i++) {
289 unsigned long mstart, mend;
290
291 mstart = image->segment[i].mem;
292 mend = mstart + image->segment[i].memsz;
293 if ((end > mstart) && (start < mend))
294 return 1;
295 }
296
297 return 0;
298 }
299
kimage_alloc_pages(gfp_t gfp_mask,unsigned int order)300 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
301 {
302 struct page *pages;
303
304 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
305 if (pages) {
306 unsigned int count, i;
307
308 pages->mapping = NULL;
309 set_page_private(pages, order);
310 count = 1 << order;
311 for (i = 0; i < count; i++)
312 SetPageReserved(pages + i);
313
314 arch_kexec_post_alloc_pages(page_address(pages), count,
315 gfp_mask);
316
317 if (gfp_mask & __GFP_ZERO)
318 for (i = 0; i < count; i++)
319 clear_highpage(pages + i);
320 }
321
322 return pages;
323 }
324
kimage_free_pages(struct page * page)325 static void kimage_free_pages(struct page *page)
326 {
327 unsigned int order, count, i;
328
329 order = page_private(page);
330 count = 1 << order;
331
332 arch_kexec_pre_free_pages(page_address(page), count);
333
334 for (i = 0; i < count; i++)
335 ClearPageReserved(page + i);
336 __free_pages(page, order);
337 }
338
kimage_free_page_list(struct list_head * list)339 void kimage_free_page_list(struct list_head *list)
340 {
341 struct page *page, *next;
342
343 list_for_each_entry_safe(page, next, list, lru) {
344 list_del(&page->lru);
345 kimage_free_pages(page);
346 }
347 }
348
kimage_alloc_normal_control_pages(struct kimage * image,unsigned int order)349 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
350 unsigned int order)
351 {
352 /* Control pages are special, they are the intermediaries
353 * that are needed while we copy the rest of the pages
354 * to their final resting place. As such they must
355 * not conflict with either the destination addresses
356 * or memory the kernel is already using.
357 *
358 * The only case where we really need more than one of
359 * these are for architectures where we cannot disable
360 * the MMU and must instead generate an identity mapped
361 * page table for all of the memory.
362 *
363 * At worst this runs in O(N) of the image size.
364 */
365 struct list_head extra_pages;
366 struct page *pages;
367 unsigned int count;
368
369 count = 1 << order;
370 INIT_LIST_HEAD(&extra_pages);
371
372 /* Loop while I can allocate a page and the page allocated
373 * is a destination page.
374 */
375 do {
376 unsigned long pfn, epfn, addr, eaddr;
377
378 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
379 if (!pages)
380 break;
381 pfn = page_to_boot_pfn(pages);
382 epfn = pfn + count;
383 addr = pfn << PAGE_SHIFT;
384 eaddr = epfn << PAGE_SHIFT;
385 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
386 kimage_is_destination_range(image, addr, eaddr)) {
387 list_add(&pages->lru, &extra_pages);
388 pages = NULL;
389 }
390 } while (!pages);
391
392 if (pages) {
393 /* Remember the allocated page... */
394 list_add(&pages->lru, &image->control_pages);
395
396 /* Because the page is already in it's destination
397 * location we will never allocate another page at
398 * that address. Therefore kimage_alloc_pages
399 * will not return it (again) and we don't need
400 * to give it an entry in image->segment[].
401 */
402 }
403 /* Deal with the destination pages I have inadvertently allocated.
404 *
405 * Ideally I would convert multi-page allocations into single
406 * page allocations, and add everything to image->dest_pages.
407 *
408 * For now it is simpler to just free the pages.
409 */
410 kimage_free_page_list(&extra_pages);
411
412 return pages;
413 }
414
kimage_alloc_crash_control_pages(struct kimage * image,unsigned int order)415 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
416 unsigned int order)
417 {
418 /* Control pages are special, they are the intermediaries
419 * that are needed while we copy the rest of the pages
420 * to their final resting place. As such they must
421 * not conflict with either the destination addresses
422 * or memory the kernel is already using.
423 *
424 * Control pages are also the only pags we must allocate
425 * when loading a crash kernel. All of the other pages
426 * are specified by the segments and we just memcpy
427 * into them directly.
428 *
429 * The only case where we really need more than one of
430 * these are for architectures where we cannot disable
431 * the MMU and must instead generate an identity mapped
432 * page table for all of the memory.
433 *
434 * Given the low demand this implements a very simple
435 * allocator that finds the first hole of the appropriate
436 * size in the reserved memory region, and allocates all
437 * of the memory up to and including the hole.
438 */
439 unsigned long hole_start, hole_end, size;
440 struct page *pages;
441
442 pages = NULL;
443 size = (1 << order) << PAGE_SHIFT;
444 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
445 hole_end = hole_start + size - 1;
446 while (hole_end <= crashk_res.end) {
447 unsigned long i;
448
449 cond_resched();
450
451 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
452 break;
453 /* See if I overlap any of the segments */
454 for (i = 0; i < image->nr_segments; i++) {
455 unsigned long mstart, mend;
456
457 mstart = image->segment[i].mem;
458 mend = mstart + image->segment[i].memsz - 1;
459 if ((hole_end >= mstart) && (hole_start <= mend)) {
460 /* Advance the hole to the end of the segment */
461 hole_start = (mend + (size - 1)) & ~(size - 1);
462 hole_end = hole_start + size - 1;
463 break;
464 }
465 }
466 /* If I don't overlap any segments I have found my hole! */
467 if (i == image->nr_segments) {
468 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
469 image->control_page = hole_end;
470 break;
471 }
472 }
473
474 return pages;
475 }
476
477
kimage_alloc_control_pages(struct kimage * image,unsigned int order)478 struct page *kimage_alloc_control_pages(struct kimage *image,
479 unsigned int order)
480 {
481 struct page *pages = NULL;
482
483 switch (image->type) {
484 case KEXEC_TYPE_DEFAULT:
485 pages = kimage_alloc_normal_control_pages(image, order);
486 break;
487 case KEXEC_TYPE_CRASH:
488 pages = kimage_alloc_crash_control_pages(image, order);
489 break;
490 }
491
492 return pages;
493 }
494
kimage_crash_copy_vmcoreinfo(struct kimage * image)495 int kimage_crash_copy_vmcoreinfo(struct kimage *image)
496 {
497 struct page *vmcoreinfo_page;
498 void *safecopy;
499
500 if (image->type != KEXEC_TYPE_CRASH)
501 return 0;
502
503 /*
504 * For kdump, allocate one vmcoreinfo safe copy from the
505 * crash memory. as we have arch_kexec_protect_crashkres()
506 * after kexec syscall, we naturally protect it from write
507 * (even read) access under kernel direct mapping. But on
508 * the other hand, we still need to operate it when crash
509 * happens to generate vmcoreinfo note, hereby we rely on
510 * vmap for this purpose.
511 */
512 vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
513 if (!vmcoreinfo_page) {
514 pr_warn("Could not allocate vmcoreinfo buffer\n");
515 return -ENOMEM;
516 }
517 safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
518 if (!safecopy) {
519 pr_warn("Could not vmap vmcoreinfo buffer\n");
520 return -ENOMEM;
521 }
522
523 image->vmcoreinfo_data_copy = safecopy;
524 crash_update_vmcoreinfo_safecopy(safecopy);
525
526 return 0;
527 }
528
kimage_add_entry(struct kimage * image,kimage_entry_t entry)529 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
530 {
531 if (*image->entry != 0)
532 image->entry++;
533
534 if (image->entry == image->last_entry) {
535 kimage_entry_t *ind_page;
536 struct page *page;
537
538 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
539 if (!page)
540 return -ENOMEM;
541
542 ind_page = page_address(page);
543 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
544 image->entry = ind_page;
545 image->last_entry = ind_page +
546 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
547 }
548 *image->entry = entry;
549 image->entry++;
550 *image->entry = 0;
551
552 return 0;
553 }
554
kimage_set_destination(struct kimage * image,unsigned long destination)555 static int kimage_set_destination(struct kimage *image,
556 unsigned long destination)
557 {
558 int result;
559
560 destination &= PAGE_MASK;
561 result = kimage_add_entry(image, destination | IND_DESTINATION);
562
563 return result;
564 }
565
566
kimage_add_page(struct kimage * image,unsigned long page)567 static int kimage_add_page(struct kimage *image, unsigned long page)
568 {
569 int result;
570
571 page &= PAGE_MASK;
572 result = kimage_add_entry(image, page | IND_SOURCE);
573
574 return result;
575 }
576
577
kimage_free_extra_pages(struct kimage * image)578 static void kimage_free_extra_pages(struct kimage *image)
579 {
580 /* Walk through and free any extra destination pages I may have */
581 kimage_free_page_list(&image->dest_pages);
582
583 /* Walk through and free any unusable pages I have cached */
584 kimage_free_page_list(&image->unusable_pages);
585
586 }
kimage_terminate(struct kimage * image)587 void kimage_terminate(struct kimage *image)
588 {
589 if (*image->entry != 0)
590 image->entry++;
591
592 *image->entry = IND_DONE;
593 }
594
595 #define for_each_kimage_entry(image, ptr, entry) \
596 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
597 ptr = (entry & IND_INDIRECTION) ? \
598 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
599
kimage_free_entry(kimage_entry_t entry)600 static void kimage_free_entry(kimage_entry_t entry)
601 {
602 struct page *page;
603
604 page = boot_pfn_to_page(entry >> PAGE_SHIFT);
605 kimage_free_pages(page);
606 }
607
kimage_free(struct kimage * image)608 void kimage_free(struct kimage *image)
609 {
610 kimage_entry_t *ptr, entry;
611 kimage_entry_t ind = 0;
612
613 if (!image)
614 return;
615
616 if (image->vmcoreinfo_data_copy) {
617 crash_update_vmcoreinfo_safecopy(NULL);
618 vunmap(image->vmcoreinfo_data_copy);
619 }
620
621 kimage_free_extra_pages(image);
622 for_each_kimage_entry(image, ptr, entry) {
623 if (entry & IND_INDIRECTION) {
624 /* Free the previous indirection page */
625 if (ind & IND_INDIRECTION)
626 kimage_free_entry(ind);
627 /* Save this indirection page until we are
628 * done with it.
629 */
630 ind = entry;
631 } else if (entry & IND_SOURCE)
632 kimage_free_entry(entry);
633 }
634 /* Free the final indirection page */
635 if (ind & IND_INDIRECTION)
636 kimage_free_entry(ind);
637
638 /* Handle any machine specific cleanup */
639 machine_kexec_cleanup(image);
640
641 /* Free the kexec control pages... */
642 kimage_free_page_list(&image->control_pages);
643
644 /*
645 * Free up any temporary buffers allocated. This might hit if
646 * error occurred much later after buffer allocation.
647 */
648 if (image->file_mode)
649 kimage_file_post_load_cleanup(image);
650
651 kfree(image);
652 }
653
kimage_dst_used(struct kimage * image,unsigned long page)654 static kimage_entry_t *kimage_dst_used(struct kimage *image,
655 unsigned long page)
656 {
657 kimage_entry_t *ptr, entry;
658 unsigned long destination = 0;
659
660 for_each_kimage_entry(image, ptr, entry) {
661 if (entry & IND_DESTINATION)
662 destination = entry & PAGE_MASK;
663 else if (entry & IND_SOURCE) {
664 if (page == destination)
665 return ptr;
666 destination += PAGE_SIZE;
667 }
668 }
669
670 return NULL;
671 }
672
kimage_alloc_page(struct kimage * image,gfp_t gfp_mask,unsigned long destination)673 static struct page *kimage_alloc_page(struct kimage *image,
674 gfp_t gfp_mask,
675 unsigned long destination)
676 {
677 /*
678 * Here we implement safeguards to ensure that a source page
679 * is not copied to its destination page before the data on
680 * the destination page is no longer useful.
681 *
682 * To do this we maintain the invariant that a source page is
683 * either its own destination page, or it is not a
684 * destination page at all.
685 *
686 * That is slightly stronger than required, but the proof
687 * that no problems will not occur is trivial, and the
688 * implementation is simply to verify.
689 *
690 * When allocating all pages normally this algorithm will run
691 * in O(N) time, but in the worst case it will run in O(N^2)
692 * time. If the runtime is a problem the data structures can
693 * be fixed.
694 */
695 struct page *page;
696 unsigned long addr;
697
698 /*
699 * Walk through the list of destination pages, and see if I
700 * have a match.
701 */
702 list_for_each_entry(page, &image->dest_pages, lru) {
703 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
704 if (addr == destination) {
705 list_del(&page->lru);
706 return page;
707 }
708 }
709 page = NULL;
710 while (1) {
711 kimage_entry_t *old;
712
713 /* Allocate a page, if we run out of memory give up */
714 page = kimage_alloc_pages(gfp_mask, 0);
715 if (!page)
716 return NULL;
717 /* If the page cannot be used file it away */
718 if (page_to_boot_pfn(page) >
719 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
720 list_add(&page->lru, &image->unusable_pages);
721 continue;
722 }
723 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
724
725 /* If it is the destination page we want use it */
726 if (addr == destination)
727 break;
728
729 /* If the page is not a destination page use it */
730 if (!kimage_is_destination_range(image, addr,
731 addr + PAGE_SIZE))
732 break;
733
734 /*
735 * I know that the page is someones destination page.
736 * See if there is already a source page for this
737 * destination page. And if so swap the source pages.
738 */
739 old = kimage_dst_used(image, addr);
740 if (old) {
741 /* If so move it */
742 unsigned long old_addr;
743 struct page *old_page;
744
745 old_addr = *old & PAGE_MASK;
746 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
747 copy_highpage(page, old_page);
748 *old = addr | (*old & ~PAGE_MASK);
749
750 /* The old page I have found cannot be a
751 * destination page, so return it if it's
752 * gfp_flags honor the ones passed in.
753 */
754 if (!(gfp_mask & __GFP_HIGHMEM) &&
755 PageHighMem(old_page)) {
756 kimage_free_pages(old_page);
757 continue;
758 }
759 addr = old_addr;
760 page = old_page;
761 break;
762 }
763 /* Place the page on the destination list, to be used later */
764 list_add(&page->lru, &image->dest_pages);
765 }
766
767 return page;
768 }
769
kimage_load_normal_segment(struct kimage * image,struct kexec_segment * segment)770 static int kimage_load_normal_segment(struct kimage *image,
771 struct kexec_segment *segment)
772 {
773 unsigned long maddr;
774 size_t ubytes, mbytes;
775 int result;
776 unsigned char __user *buf = NULL;
777 unsigned char *kbuf = NULL;
778
779 result = 0;
780 if (image->file_mode)
781 kbuf = segment->kbuf;
782 else
783 buf = segment->buf;
784 ubytes = segment->bufsz;
785 mbytes = segment->memsz;
786 maddr = segment->mem;
787
788 result = kimage_set_destination(image, maddr);
789 if (result < 0)
790 goto out;
791
792 while (mbytes) {
793 struct page *page;
794 char *ptr;
795 size_t uchunk, mchunk;
796
797 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
798 if (!page) {
799 result = -ENOMEM;
800 goto out;
801 }
802 result = kimage_add_page(image, page_to_boot_pfn(page)
803 << PAGE_SHIFT);
804 if (result < 0)
805 goto out;
806
807 ptr = kmap(page);
808 /* Start with a clear page */
809 clear_page(ptr);
810 ptr += maddr & ~PAGE_MASK;
811 mchunk = min_t(size_t, mbytes,
812 PAGE_SIZE - (maddr & ~PAGE_MASK));
813 uchunk = min(ubytes, mchunk);
814
815 /* For file based kexec, source pages are in kernel memory */
816 if (image->file_mode)
817 memcpy(ptr, kbuf, uchunk);
818 else
819 result = copy_from_user(ptr, buf, uchunk);
820 kunmap(page);
821 if (result) {
822 result = -EFAULT;
823 goto out;
824 }
825 ubytes -= uchunk;
826 maddr += mchunk;
827 if (image->file_mode)
828 kbuf += mchunk;
829 else
830 buf += mchunk;
831 mbytes -= mchunk;
832
833 cond_resched();
834 }
835 out:
836 return result;
837 }
838
kimage_load_crash_segment(struct kimage * image,struct kexec_segment * segment)839 static int kimage_load_crash_segment(struct kimage *image,
840 struct kexec_segment *segment)
841 {
842 /* For crash dumps kernels we simply copy the data from
843 * user space to it's destination.
844 * We do things a page at a time for the sake of kmap.
845 */
846 unsigned long maddr;
847 size_t ubytes, mbytes;
848 int result;
849 unsigned char __user *buf = NULL;
850 unsigned char *kbuf = NULL;
851
852 result = 0;
853 if (image->file_mode)
854 kbuf = segment->kbuf;
855 else
856 buf = segment->buf;
857 ubytes = segment->bufsz;
858 mbytes = segment->memsz;
859 maddr = segment->mem;
860 while (mbytes) {
861 struct page *page;
862 char *ptr;
863 size_t uchunk, mchunk;
864
865 page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
866 if (!page) {
867 result = -ENOMEM;
868 goto out;
869 }
870 ptr = kmap(page);
871 ptr += maddr & ~PAGE_MASK;
872 mchunk = min_t(size_t, mbytes,
873 PAGE_SIZE - (maddr & ~PAGE_MASK));
874 uchunk = min(ubytes, mchunk);
875 if (mchunk > uchunk) {
876 /* Zero the trailing part of the page */
877 memset(ptr + uchunk, 0, mchunk - uchunk);
878 }
879
880 /* For file based kexec, source pages are in kernel memory */
881 if (image->file_mode)
882 memcpy(ptr, kbuf, uchunk);
883 else
884 result = copy_from_user(ptr, buf, uchunk);
885 kexec_flush_icache_page(page);
886 kunmap(page);
887 if (result) {
888 result = -EFAULT;
889 goto out;
890 }
891 ubytes -= uchunk;
892 maddr += mchunk;
893 if (image->file_mode)
894 kbuf += mchunk;
895 else
896 buf += mchunk;
897 mbytes -= mchunk;
898
899 cond_resched();
900 }
901 out:
902 return result;
903 }
904
kimage_load_segment(struct kimage * image,struct kexec_segment * segment)905 int kimage_load_segment(struct kimage *image,
906 struct kexec_segment *segment)
907 {
908 int result = -ENOMEM;
909
910 switch (image->type) {
911 case KEXEC_TYPE_DEFAULT:
912 result = kimage_load_normal_segment(image, segment);
913 break;
914 case KEXEC_TYPE_CRASH:
915 result = kimage_load_crash_segment(image, segment);
916 break;
917 }
918
919 return result;
920 }
921
922 struct kimage *kexec_image;
923 struct kimage *kexec_crash_image;
924 int kexec_load_disabled;
925
926 /*
927 * No panic_cpu check version of crash_kexec(). This function is called
928 * only when panic_cpu holds the current CPU number; this is the only CPU
929 * which processes crash_kexec routines.
930 */
__crash_kexec(struct pt_regs * regs)931 void __noclone __crash_kexec(struct pt_regs *regs)
932 {
933 /* Take the kexec_mutex here to prevent sys_kexec_load
934 * running on one cpu from replacing the crash kernel
935 * we are using after a panic on a different cpu.
936 *
937 * If the crash kernel was not located in a fixed area
938 * of memory the xchg(&kexec_crash_image) would be
939 * sufficient. But since I reuse the memory...
940 */
941 if (mutex_trylock(&kexec_mutex)) {
942 if (kexec_crash_image) {
943 struct pt_regs fixed_regs;
944
945 crash_setup_regs(&fixed_regs, regs);
946 crash_save_vmcoreinfo();
947 machine_crash_shutdown(&fixed_regs);
948 machine_kexec(kexec_crash_image);
949 }
950 mutex_unlock(&kexec_mutex);
951 }
952 }
953 STACK_FRAME_NON_STANDARD(__crash_kexec);
954
crash_kexec(struct pt_regs * regs)955 void crash_kexec(struct pt_regs *regs)
956 {
957 int old_cpu, this_cpu;
958
959 /*
960 * Only one CPU is allowed to execute the crash_kexec() code as with
961 * panic(). Otherwise parallel calls of panic() and crash_kexec()
962 * may stop each other. To exclude them, we use panic_cpu here too.
963 */
964 this_cpu = raw_smp_processor_id();
965 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
966 if (old_cpu == PANIC_CPU_INVALID) {
967 /* This is the 1st CPU which comes here, so go ahead. */
968 printk_safe_flush_on_panic();
969 __crash_kexec(regs);
970
971 /*
972 * Reset panic_cpu to allow another panic()/crash_kexec()
973 * call.
974 */
975 atomic_set(&panic_cpu, PANIC_CPU_INVALID);
976 }
977 }
978
crash_get_memory_size(void)979 size_t crash_get_memory_size(void)
980 {
981 size_t size = 0;
982
983 mutex_lock(&kexec_mutex);
984 if (crashk_res.end != crashk_res.start)
985 size = resource_size(&crashk_res);
986 mutex_unlock(&kexec_mutex);
987 return size;
988 }
989
crash_free_reserved_phys_range(unsigned long begin,unsigned long end)990 void __weak crash_free_reserved_phys_range(unsigned long begin,
991 unsigned long end)
992 {
993 unsigned long addr;
994
995 for (addr = begin; addr < end; addr += PAGE_SIZE)
996 free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
997 }
998
crash_shrink_memory(unsigned long new_size)999 int crash_shrink_memory(unsigned long new_size)
1000 {
1001 int ret = 0;
1002 unsigned long start, end;
1003 unsigned long old_size;
1004 struct resource *ram_res;
1005
1006 mutex_lock(&kexec_mutex);
1007
1008 if (kexec_crash_image) {
1009 ret = -ENOENT;
1010 goto unlock;
1011 }
1012 start = crashk_res.start;
1013 end = crashk_res.end;
1014 old_size = (end == 0) ? 0 : end - start + 1;
1015 if (new_size >= old_size) {
1016 ret = (new_size == old_size) ? 0 : -EINVAL;
1017 goto unlock;
1018 }
1019
1020 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1021 if (!ram_res) {
1022 ret = -ENOMEM;
1023 goto unlock;
1024 }
1025
1026 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1027 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1028
1029 crash_free_reserved_phys_range(end, crashk_res.end);
1030
1031 if ((start == end) && (crashk_res.parent != NULL))
1032 release_resource(&crashk_res);
1033
1034 ram_res->start = end;
1035 ram_res->end = crashk_res.end;
1036 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1037 ram_res->name = "System RAM";
1038
1039 crashk_res.end = end - 1;
1040
1041 insert_resource(&iomem_resource, ram_res);
1042
1043 unlock:
1044 mutex_unlock(&kexec_mutex);
1045 return ret;
1046 }
1047
crash_save_cpu(struct pt_regs * regs,int cpu)1048 void crash_save_cpu(struct pt_regs *regs, int cpu)
1049 {
1050 struct elf_prstatus prstatus;
1051 u32 *buf;
1052
1053 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1054 return;
1055
1056 /* Using ELF notes here is opportunistic.
1057 * I need a well defined structure format
1058 * for the data I pass, and I need tags
1059 * on the data to indicate what information I have
1060 * squirrelled away. ELF notes happen to provide
1061 * all of that, so there is no need to invent something new.
1062 */
1063 buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1064 if (!buf)
1065 return;
1066 memset(&prstatus, 0, sizeof(prstatus));
1067 prstatus.pr_pid = current->pid;
1068 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1069 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1070 &prstatus, sizeof(prstatus));
1071 final_note(buf);
1072 }
1073
crash_notes_memory_init(void)1074 static int __init crash_notes_memory_init(void)
1075 {
1076 /* Allocate memory for saving cpu registers. */
1077 size_t size, align;
1078
1079 /*
1080 * crash_notes could be allocated across 2 vmalloc pages when percpu
1081 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1082 * pages are also on 2 continuous physical pages. In this case the
1083 * 2nd part of crash_notes in 2nd page could be lost since only the
1084 * starting address and size of crash_notes are exported through sysfs.
1085 * Here round up the size of crash_notes to the nearest power of two
1086 * and pass it to __alloc_percpu as align value. This can make sure
1087 * crash_notes is allocated inside one physical page.
1088 */
1089 size = sizeof(note_buf_t);
1090 align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1091
1092 /*
1093 * Break compile if size is bigger than PAGE_SIZE since crash_notes
1094 * definitely will be in 2 pages with that.
1095 */
1096 BUILD_BUG_ON(size > PAGE_SIZE);
1097
1098 crash_notes = __alloc_percpu(size, align);
1099 if (!crash_notes) {
1100 pr_warn("Memory allocation for saving cpu register states failed\n");
1101 return -ENOMEM;
1102 }
1103 return 0;
1104 }
1105 subsys_initcall(crash_notes_memory_init);
1106
1107
1108 /*
1109 * Move into place and start executing a preloaded standalone
1110 * executable. If nothing was preloaded return an error.
1111 */
kernel_kexec(void)1112 int kernel_kexec(void)
1113 {
1114 int error = 0;
1115
1116 if (!mutex_trylock(&kexec_mutex))
1117 return -EBUSY;
1118 if (!kexec_image) {
1119 error = -EINVAL;
1120 goto Unlock;
1121 }
1122
1123 #ifdef CONFIG_KEXEC_JUMP
1124 if (kexec_image->preserve_context) {
1125 lock_system_sleep();
1126 pm_prepare_console();
1127 error = freeze_processes();
1128 if (error) {
1129 error = -EBUSY;
1130 goto Restore_console;
1131 }
1132 suspend_console();
1133 error = dpm_suspend_start(PMSG_FREEZE);
1134 if (error)
1135 goto Resume_console;
1136 /* At this point, dpm_suspend_start() has been called,
1137 * but *not* dpm_suspend_end(). We *must* call
1138 * dpm_suspend_end() now. Otherwise, drivers for
1139 * some devices (e.g. interrupt controllers) become
1140 * desynchronized with the actual state of the
1141 * hardware at resume time, and evil weirdness ensues.
1142 */
1143 error = dpm_suspend_end(PMSG_FREEZE);
1144 if (error)
1145 goto Resume_devices;
1146 error = disable_nonboot_cpus();
1147 if (error)
1148 goto Enable_cpus;
1149 local_irq_disable();
1150 error = syscore_suspend();
1151 if (error)
1152 goto Enable_irqs;
1153 } else
1154 #endif
1155 {
1156 kexec_in_progress = true;
1157 kernel_restart_prepare(NULL);
1158 migrate_to_reboot_cpu();
1159
1160 /*
1161 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1162 * no further code needs to use CPU hotplug (which is true in
1163 * the reboot case). However, the kexec path depends on using
1164 * CPU hotplug again; so re-enable it here.
1165 */
1166 cpu_hotplug_enable();
1167 pr_emerg("Starting new kernel\n");
1168 machine_shutdown();
1169 }
1170
1171 machine_kexec(kexec_image);
1172
1173 #ifdef CONFIG_KEXEC_JUMP
1174 if (kexec_image->preserve_context) {
1175 syscore_resume();
1176 Enable_irqs:
1177 local_irq_enable();
1178 Enable_cpus:
1179 enable_nonboot_cpus();
1180 dpm_resume_start(PMSG_RESTORE);
1181 Resume_devices:
1182 dpm_resume_end(PMSG_RESTORE);
1183 Resume_console:
1184 resume_console();
1185 thaw_processes();
1186 Restore_console:
1187 pm_restore_console();
1188 unlock_system_sleep();
1189 }
1190 #endif
1191
1192 Unlock:
1193 mutex_unlock(&kexec_mutex);
1194 return error;
1195 }
1196
1197 /*
1198 * Protection mechanism for crashkernel reserved memory after
1199 * the kdump kernel is loaded.
1200 *
1201 * Provide an empty default implementation here -- architecture
1202 * code may override this
1203 */
arch_kexec_protect_crashkres(void)1204 void __weak arch_kexec_protect_crashkres(void)
1205 {}
1206
arch_kexec_unprotect_crashkres(void)1207 void __weak arch_kexec_unprotect_crashkres(void)
1208 {}
1209