Lines Matching +full:segment +full:- +full:no +full:- +full:remap

1 // SPDX-License-Identifier: GPL-2.0-only
74 INIT_LIST_HEAD(&cb->next); in register_vmcore_cb()
76 list_add_tail(&cb->next, &vmcore_cb_list); in register_vmcore_cb()
90 list_del_rcu(&cb->next); in unregister_vmcore_cb()
111 if (unlikely(!cb->pfn_is_ram)) in pfn_is_ram()
113 ret = cb->pfn_is_ram(cb, pfn); in pfn_is_ram()
147 if (count > (PAGE_SIZE - offset)) in read_from_oldmem()
148 nr_bytes = PAGE_SIZE - offset; in read_from_oldmem()
166 return -EFAULT; in read_from_oldmem()
170 count -= nr_bytes; in read_from_oldmem()
252 if (start < offset + dump->size) { in vmcoredd_copy_dumps()
253 tsz = min(offset + (u64)dump->size - start, (u64)size); in vmcoredd_copy_dumps()
254 buf = dump->buf + start - offset; in vmcoredd_copy_dumps()
256 ret = -EFAULT; in vmcoredd_copy_dumps()
260 size -= tsz; in vmcoredd_copy_dumps()
267 offset += dump->size; in vmcoredd_copy_dumps()
287 if (start < offset + dump->size) { in vmcoredd_mmap_dumps()
288 tsz = min(offset + (u64)dump->size - start, (u64)size); in vmcoredd_mmap_dumps()
289 buf = dump->buf + start - offset; in vmcoredd_mmap_dumps()
292 ret = -EFAULT; in vmcoredd_mmap_dumps()
296 size -= tsz; in vmcoredd_mmap_dumps()
304 offset += dump->size; in vmcoredd_mmap_dumps()
327 iov_iter_truncate(iter, vmcore_size - *fpos); in __read_vmcore()
331 tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter)); in __read_vmcore()
333 return -EFAULT; in __read_vmcore()
342 /* Read Elf note segment */ in __read_vmcore()
348 * completely and we will end up with zero-filled data in __read_vmcore()
350 * then try to decode this zero-filled data as valid notes in __read_vmcore()
352 * the other elf notes ensure that zero-filled data can be in __read_vmcore()
358 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz - in __read_vmcore()
360 start = *fpos - elfcorebuf_sz; in __read_vmcore()
362 return -EFAULT; in __read_vmcore()
374 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, in __read_vmcore()
376 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz; in __read_vmcore()
378 return -EFAULT; in __read_vmcore()
389 if (*fpos < m->offset + m->size) { in __read_vmcore()
391 m->offset + m->size - *fpos, in __read_vmcore()
393 start = m->paddr + *fpos - m->offset; in __read_vmcore()
412 return __read_vmcore(iter, &iocb->ki_pos); in read_vmcore()
425 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in mmap_vmcore_fault()
426 pgoff_t index = vmf->pgoff; in mmap_vmcore_fault()
451 vmf->page = page; in mmap_vmcore_fault()
463 * vmcore_alloc_buf - allocate buffer in vmalloc memory
467 * the buffer to user-space by means of remap_vmalloc_range().
470 * disabled and there's no need to allow users to mmap the buffer.
484 * non-contiguous objects (ELF header, ELF note segment and memory
486 * virtually contiguous user-space in ELF layout.
490 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
499 * Returns zero on success, -EAGAIN on failure.
516 * We hit a page which is not ram. Remap the continuous in remap_oldmem_pfn_checked()
517 * region between pos_start and pos-1 and replace in remap_oldmem_pfn_checked()
518 * the non-ram page at pos with the zero page. in remap_oldmem_pfn_checked()
521 /* Remap continuous region */ in remap_oldmem_pfn_checked()
522 map_size = (pos - pos_start) << PAGE_SHIFT; in remap_oldmem_pfn_checked()
529 /* Remap the zero page */ in remap_oldmem_pfn_checked()
539 /* Remap the rest */ in remap_oldmem_pfn_checked()
540 map_size = (pos - pos_start) << PAGE_SHIFT; in remap_oldmem_pfn_checked()
547 do_munmap(vma->vm_mm, from, len, NULL); in remap_oldmem_pfn_checked()
548 return -EAGAIN; in remap_oldmem_pfn_checked()
572 size_t size = vma->vm_end - vma->vm_start; in mmap_vmcore()
576 start = (u64)vma->vm_pgoff << PAGE_SHIFT; in mmap_vmcore()
580 return -EINVAL; in mmap_vmcore()
582 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) in mmap_vmcore()
583 return -EPERM; in mmap_vmcore()
585 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); in mmap_vmcore()
586 vma->vm_flags |= VM_MIXEDMAP; in mmap_vmcore()
587 vma->vm_ops = &vmcore_mmap_ops; in mmap_vmcore()
594 tsz = min(elfcorebuf_sz - (size_t)start, size); in mmap_vmcore()
596 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz, in mmap_vmcore()
597 vma->vm_page_prot)) in mmap_vmcore()
598 return -EAGAIN; in mmap_vmcore()
599 size -= tsz; in mmap_vmcore()
612 * completely and we will end up with zero-filled data in mmap_vmcore()
614 * then try to decode this zero-filled data as valid notes in mmap_vmcore()
616 * the other elf notes ensure that zero-filled data can be in mmap_vmcore()
626 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz - in mmap_vmcore()
628 start_off = start - elfcorebuf_sz; in mmap_vmcore()
629 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len, in mmap_vmcore()
633 size -= tsz; in mmap_vmcore()
644 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); in mmap_vmcore()
645 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz; in mmap_vmcore()
646 if (remap_vmalloc_range_partial(vma, vma->vm_start + len, in mmap_vmcore()
650 size -= tsz; in mmap_vmcore()
659 if (start < m->offset + m->size) { in mmap_vmcore()
663 m->offset + m->size - start, size); in mmap_vmcore()
664 paddr = m->paddr + start - m->offset; in mmap_vmcore()
665 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len, in mmap_vmcore()
667 vma->vm_page_prot)) in mmap_vmcore()
669 size -= tsz; in mmap_vmcore()
680 do_munmap(vma->vm_mm, vma->vm_start, len, NULL); in mmap_vmcore()
681 return -EAGAIN; in mmap_vmcore()
686 return -ENOSYS; in mmap_vmcore()
710 size += m->size; in get_vmcore_size()
716 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
722 * note segment.
731 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in update_note_header_size_elf64()
734 if (phdr_ptr->p_type != PT_NOTE) in update_note_header_size_elf64()
736 max_sz = phdr_ptr->p_memsz; in update_note_header_size_elf64()
737 offset = phdr_ptr->p_offset; in update_note_header_size_elf64()
740 return -ENOMEM; in update_note_header_size_elf64()
747 while (nhdr_ptr->n_namesz != 0) { in update_note_header_size_elf64()
749 (((u64)nhdr_ptr->n_namesz + 3) & ~3) + in update_note_header_size_elf64()
750 (((u64)nhdr_ptr->n_descsz + 3) & ~3); in update_note_header_size_elf64()
753 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); in update_note_header_size_elf64()
760 phdr_ptr->p_memsz = real_sz; in update_note_header_size_elf64()
770 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
771 * headers and sum of real size of their ELF note segment headers and
780 * @sz_ptnote in its phdr->p_mem.
784 * and each of PT_NOTE program headers has actual ELF note segment
796 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in get_note_number_and_size_elf64()
797 if (phdr_ptr->p_type != PT_NOTE) in get_note_number_and_size_elf64()
800 *sz_ptnote += phdr_ptr->p_memsz; in get_note_number_and_size_elf64()
807 * copy_notes_elf64 - copy ELF note segments in a given buffer
812 * This function is used to copy ELF note segment in the 1st kernel
815 * real ELF note segment headers and data.
819 * and each of PT_NOTE program headers has actual ELF note segment
829 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in copy_notes_elf64()
831 if (phdr_ptr->p_type != PT_NOTE) in copy_notes_elf64()
833 offset = phdr_ptr->p_offset; in copy_notes_elf64()
834 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, in copy_notes_elf64()
838 notes_buf += phdr_ptr->p_memsz; in copy_notes_elf64()
867 return -ENOMEM; in merge_note_headers_elf64()
877 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); in merge_note_headers_elf64()
889 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); in merge_note_headers_elf64()
890 *elfsz = *elfsz - i; in merge_note_headers_elf64()
891 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); in merge_note_headers_elf64()
896 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; in merge_note_headers_elf64()
907 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
913 * note segment.
922 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in update_note_header_size_elf32()
925 if (phdr_ptr->p_type != PT_NOTE) in update_note_header_size_elf32()
927 max_sz = phdr_ptr->p_memsz; in update_note_header_size_elf32()
928 offset = phdr_ptr->p_offset; in update_note_header_size_elf32()
931 return -ENOMEM; in update_note_header_size_elf32()
938 while (nhdr_ptr->n_namesz != 0) { in update_note_header_size_elf32()
940 (((u64)nhdr_ptr->n_namesz + 3) & ~3) + in update_note_header_size_elf32()
941 (((u64)nhdr_ptr->n_descsz + 3) & ~3); in update_note_header_size_elf32()
944 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); in update_note_header_size_elf32()
951 phdr_ptr->p_memsz = real_sz; in update_note_header_size_elf32()
961 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
962 * headers and sum of real size of their ELF note segment headers and
971 * @sz_ptnote in its phdr->p_mem.
975 * and each of PT_NOTE program headers has actual ELF note segment
987 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in get_note_number_and_size_elf32()
988 if (phdr_ptr->p_type != PT_NOTE) in get_note_number_and_size_elf32()
991 *sz_ptnote += phdr_ptr->p_memsz; in get_note_number_and_size_elf32()
998 * copy_notes_elf32 - copy ELF note segments in a given buffer
1003 * This function is used to copy ELF note segment in the 1st kernel
1006 * real ELF note segment headers and data.
1010 * and each of PT_NOTE program headers has actual ELF note segment
1020 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in copy_notes_elf32()
1022 if (phdr_ptr->p_type != PT_NOTE) in copy_notes_elf32()
1024 offset = phdr_ptr->p_offset; in copy_notes_elf32()
1025 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, in copy_notes_elf32()
1029 notes_buf += phdr_ptr->p_memsz; in copy_notes_elf32()
1058 return -ENOMEM; in merge_note_headers_elf32()
1068 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); in merge_note_headers_elf32()
1080 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); in merge_note_headers_elf32()
1081 *elfsz = *elfsz - i; in merge_note_headers_elf32()
1082 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); in merge_note_headers_elf32()
1087 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; in merge_note_headers_elf32()
1113 /* Skip Elf header, program headers and Elf note segment. */ in process_ptload_program_headers_elf64()
1116 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in process_ptload_program_headers_elf64()
1119 if (phdr_ptr->p_type != PT_LOAD) in process_ptload_program_headers_elf64()
1122 paddr = phdr_ptr->p_offset; in process_ptload_program_headers_elf64()
1124 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); in process_ptload_program_headers_elf64()
1125 size = end - start; in process_ptload_program_headers_elf64()
1130 return -ENOMEM; in process_ptload_program_headers_elf64()
1131 new->paddr = start; in process_ptload_program_headers_elf64()
1132 new->size = size; in process_ptload_program_headers_elf64()
1133 list_add_tail(&new->list, vc_list); in process_ptload_program_headers_elf64()
1136 phdr_ptr->p_offset = vmcore_off + (paddr - start); in process_ptload_program_headers_elf64()
1156 /* Skip Elf header, program headers and Elf note segment. */ in process_ptload_program_headers_elf32()
1159 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in process_ptload_program_headers_elf32()
1162 if (phdr_ptr->p_type != PT_LOAD) in process_ptload_program_headers_elf32()
1165 paddr = phdr_ptr->p_offset; in process_ptload_program_headers_elf32()
1167 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); in process_ptload_program_headers_elf32()
1168 size = end - start; in process_ptload_program_headers_elf32()
1173 return -ENOMEM; in process_ptload_program_headers_elf32()
1174 new->paddr = start; in process_ptload_program_headers_elf32()
1175 new->size = size; in process_ptload_program_headers_elf32()
1176 list_add_tail(&new->list, vc_list); in process_ptload_program_headers_elf32()
1179 phdr_ptr->p_offset = vmcore_off + (paddr - start); in process_ptload_program_headers_elf32()
1192 /* Skip Elf header, program headers and Elf note segment. */ in set_vmcore_list_offsets()
1196 m->offset = vmcore_off; in set_vmcore_list_offsets()
1197 vmcore_off += m->size; in set_vmcore_list_offsets()
1233 return -EINVAL; in parse_crash_elf64_headers()
1243 return -ENOMEM; in parse_crash_elf64_headers()
1289 return -EINVAL; in parse_crash_elf32_headers()
1298 return -ENOMEM; in parse_crash_elf32_headers()
1332 return -EINVAL; in parse_crash_elf_headers()
1345 return -EINVAL; in parse_crash_elf_headers()
1357 * vmcoredd_write_header - Write vmcore device dump header at the
1370 vdd_hdr->n_namesz = sizeof(vdd_hdr->name); in vmcoredd_write_header()
1371 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name); in vmcoredd_write_header()
1372 vdd_hdr->n_type = NT_VMCOREDD; in vmcoredd_write_header()
1374 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME, in vmcoredd_write_header()
1375 sizeof(vdd_hdr->name)); in vmcoredd_write_header()
1376 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name)); in vmcoredd_write_header()
1380 * vmcoredd_update_program_headers - Update all Elf program headers
1403 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { in vmcoredd_update_program_headers()
1404 if (phdr->p_type == PT_NOTE) { in vmcoredd_update_program_headers()
1406 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz; in vmcoredd_update_program_headers()
1407 phdr->p_filesz = phdr->p_memsz; in vmcoredd_update_program_headers()
1411 start = rounddown(phdr->p_offset, PAGE_SIZE); in vmcoredd_update_program_headers()
1412 end = roundup(phdr->p_offset + phdr->p_memsz, in vmcoredd_update_program_headers()
1414 size = end - start; in vmcoredd_update_program_headers()
1415 phdr->p_offset = vmcore_off + (phdr->p_offset - start); in vmcoredd_update_program_headers()
1423 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { in vmcoredd_update_program_headers()
1424 if (phdr->p_type == PT_NOTE) { in vmcoredd_update_program_headers()
1426 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz; in vmcoredd_update_program_headers()
1427 phdr->p_filesz = phdr->p_memsz; in vmcoredd_update_program_headers()
1431 start = rounddown(phdr->p_offset, PAGE_SIZE); in vmcoredd_update_program_headers()
1432 end = roundup(phdr->p_offset + phdr->p_memsz, in vmcoredd_update_program_headers()
1434 size = end - start; in vmcoredd_update_program_headers()
1435 phdr->p_offset = vmcore_off + (phdr->p_offset - start); in vmcoredd_update_program_headers()
1442 * vmcoredd_update_size - Update the total size of the device dumps and update
1462 proc_vmcore->size = vmcore_size; in vmcoredd_update_size()
1466 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1482 return -EINVAL; in vmcore_add_device_dump()
1485 if (!data || !strlen(data->dump_name) || in vmcore_add_device_dump()
1486 !data->vmcoredd_callback || !data->size) in vmcore_add_device_dump()
1487 return -EINVAL; in vmcore_add_device_dump()
1491 ret = -ENOMEM; in vmcore_add_device_dump()
1496 data_size = roundup(sizeof(struct vmcoredd_header) + data->size, in vmcore_add_device_dump()
1502 ret = -ENOMEM; in vmcore_add_device_dump()
1506 vmcoredd_write_header(buf, data, data_size - in vmcore_add_device_dump()
1510 ret = data->vmcoredd_callback(data, buf + in vmcore_add_device_dump()
1515 dump->buf = buf; in vmcore_add_device_dump()
1516 dump->size = data_size; in vmcore_add_device_dump()
1520 list_add_tail(&dump->list, &vmcoredd_list); in vmcore_add_device_dump()
1545 list_del(&dump->list); in vmcore_free_device_dumps()
1546 vfree(dump->buf); in vmcore_free_device_dumps()
1578 proc_vmcore->size = vmcore_size; in vmcore_init()
1596 list_del(&m->list); in vmcore_cleanup()