Lines Matching refs:image
146 static struct page *kimage_alloc_page(struct kimage *image,
150 int sanity_check_segment_list(struct kimage *image) in sanity_check_segment_list() argument
153 unsigned long nr_segments = image->nr_segments; in sanity_check_segment_list()
172 mstart = image->segment[i].mem; in sanity_check_segment_list()
173 mend = mstart + image->segment[i].memsz; in sanity_check_segment_list()
191 mstart = image->segment[i].mem; in sanity_check_segment_list()
192 mend = mstart + image->segment[i].memsz; in sanity_check_segment_list()
196 pstart = image->segment[j].mem; in sanity_check_segment_list()
197 pend = pstart + image->segment[j].memsz; in sanity_check_segment_list()
210 if (image->segment[i].bufsz > image->segment[i].memsz) in sanity_check_segment_list()
220 if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2) in sanity_check_segment_list()
223 total_pages += PAGE_COUNT(image->segment[i].memsz); in sanity_check_segment_list()
239 if (image->type == KEXEC_TYPE_CRASH) { in sanity_check_segment_list()
243 mstart = image->segment[i].mem; in sanity_check_segment_list()
244 mend = mstart + image->segment[i].memsz - 1; in sanity_check_segment_list()
257 struct kimage *image; in do_kimage_alloc_init() local
260 image = kzalloc(sizeof(*image), GFP_KERNEL); in do_kimage_alloc_init()
261 if (!image) in do_kimage_alloc_init()
264 image->head = 0; in do_kimage_alloc_init()
265 image->entry = &image->head; in do_kimage_alloc_init()
266 image->last_entry = &image->head; in do_kimage_alloc_init()
267 image->control_page = ~0; /* By default this does not apply */ in do_kimage_alloc_init()
268 image->type = KEXEC_TYPE_DEFAULT; in do_kimage_alloc_init()
271 INIT_LIST_HEAD(&image->control_pages); in do_kimage_alloc_init()
274 INIT_LIST_HEAD(&image->dest_pages); in do_kimage_alloc_init()
277 INIT_LIST_HEAD(&image->unusable_pages); in do_kimage_alloc_init()
279 return image; in do_kimage_alloc_init()
282 int kimage_is_destination_range(struct kimage *image, in kimage_is_destination_range() argument
288 for (i = 0; i < image->nr_segments; i++) { in kimage_is_destination_range()
291 mstart = image->segment[i].mem; in kimage_is_destination_range()
292 mend = mstart + image->segment[i].memsz; in kimage_is_destination_range()
349 static struct page *kimage_alloc_normal_control_pages(struct kimage *image, in kimage_alloc_normal_control_pages() argument
386 kimage_is_destination_range(image, addr, eaddr)) { in kimage_alloc_normal_control_pages()
394 list_add(&pages->lru, &image->control_pages); in kimage_alloc_normal_control_pages()
415 static struct page *kimage_alloc_crash_control_pages(struct kimage *image, in kimage_alloc_crash_control_pages() argument
444 hole_start = (image->control_page + (size - 1)) & ~(size - 1); in kimage_alloc_crash_control_pages()
454 for (i = 0; i < image->nr_segments; i++) { in kimage_alloc_crash_control_pages()
457 mstart = image->segment[i].mem; in kimage_alloc_crash_control_pages()
458 mend = mstart + image->segment[i].memsz - 1; in kimage_alloc_crash_control_pages()
467 if (i == image->nr_segments) { in kimage_alloc_crash_control_pages()
469 image->control_page = hole_end; in kimage_alloc_crash_control_pages()
478 struct page *kimage_alloc_control_pages(struct kimage *image, in kimage_alloc_control_pages() argument
483 switch (image->type) { in kimage_alloc_control_pages()
485 pages = kimage_alloc_normal_control_pages(image, order); in kimage_alloc_control_pages()
488 pages = kimage_alloc_crash_control_pages(image, order); in kimage_alloc_control_pages()
495 int kimage_crash_copy_vmcoreinfo(struct kimage *image) in kimage_crash_copy_vmcoreinfo() argument
500 if (image->type != KEXEC_TYPE_CRASH) in kimage_crash_copy_vmcoreinfo()
512 vmcoreinfo_page = kimage_alloc_control_pages(image, 0); in kimage_crash_copy_vmcoreinfo()
523 image->vmcoreinfo_data_copy = safecopy; in kimage_crash_copy_vmcoreinfo()
529 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) in kimage_add_entry() argument
531 if (*image->entry != 0) in kimage_add_entry()
532 image->entry++; in kimage_add_entry()
534 if (image->entry == image->last_entry) { in kimage_add_entry()
538 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); in kimage_add_entry()
543 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION; in kimage_add_entry()
544 image->entry = ind_page; in kimage_add_entry()
545 image->last_entry = ind_page + in kimage_add_entry()
548 *image->entry = entry; in kimage_add_entry()
549 image->entry++; in kimage_add_entry()
550 *image->entry = 0; in kimage_add_entry()
555 static int kimage_set_destination(struct kimage *image, in kimage_set_destination() argument
561 result = kimage_add_entry(image, destination | IND_DESTINATION); in kimage_set_destination()
567 static int kimage_add_page(struct kimage *image, unsigned long page) in kimage_add_page() argument
572 result = kimage_add_entry(image, page | IND_SOURCE); in kimage_add_page()
578 static void kimage_free_extra_pages(struct kimage *image) in kimage_free_extra_pages() argument
581 kimage_free_page_list(&image->dest_pages); in kimage_free_extra_pages()
584 kimage_free_page_list(&image->unusable_pages); in kimage_free_extra_pages()
587 void kimage_terminate(struct kimage *image) in kimage_terminate() argument
589 if (*image->entry != 0) in kimage_terminate()
590 image->entry++; in kimage_terminate()
592 *image->entry = IND_DONE; in kimage_terminate()
595 #define for_each_kimage_entry(image, ptr, entry) \ argument
596 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
608 void kimage_free(struct kimage *image) in kimage_free() argument
613 if (!image) in kimage_free()
616 if (image->vmcoreinfo_data_copy) { in kimage_free()
618 vunmap(image->vmcoreinfo_data_copy); in kimage_free()
621 kimage_free_extra_pages(image); in kimage_free()
622 for_each_kimage_entry(image, ptr, entry) { in kimage_free()
639 machine_kexec_cleanup(image); in kimage_free()
642 kimage_free_page_list(&image->control_pages); in kimage_free()
648 if (image->file_mode) in kimage_free()
649 kimage_file_post_load_cleanup(image); in kimage_free()
651 kfree(image); in kimage_free()
654 static kimage_entry_t *kimage_dst_used(struct kimage *image, in kimage_dst_used() argument
660 for_each_kimage_entry(image, ptr, entry) { in kimage_dst_used()
673 static struct page *kimage_alloc_page(struct kimage *image, in kimage_alloc_page() argument
702 list_for_each_entry(page, &image->dest_pages, lru) { in kimage_alloc_page()
720 list_add(&page->lru, &image->unusable_pages); in kimage_alloc_page()
730 if (!kimage_is_destination_range(image, addr, in kimage_alloc_page()
739 old = kimage_dst_used(image, addr); in kimage_alloc_page()
764 list_add(&page->lru, &image->dest_pages); in kimage_alloc_page()
770 static int kimage_load_normal_segment(struct kimage *image, in kimage_load_normal_segment() argument
780 if (image->file_mode) in kimage_load_normal_segment()
788 result = kimage_set_destination(image, maddr); in kimage_load_normal_segment()
797 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); in kimage_load_normal_segment()
802 result = kimage_add_page(image, page_to_boot_pfn(page) in kimage_load_normal_segment()
816 if (image->file_mode) in kimage_load_normal_segment()
827 if (image->file_mode) in kimage_load_normal_segment()
839 static int kimage_load_crash_segment(struct kimage *image, in kimage_load_crash_segment() argument
853 if (image->file_mode) in kimage_load_crash_segment()
881 if (image->file_mode) in kimage_load_crash_segment()
893 if (image->file_mode) in kimage_load_crash_segment()
905 int kimage_load_segment(struct kimage *image, in kimage_load_segment() argument
910 switch (image->type) { in kimage_load_segment()
912 result = kimage_load_normal_segment(image, segment); in kimage_load_segment()
915 result = kimage_load_crash_segment(image, segment); in kimage_load_segment()