Lines Matching +full:vm +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Re-map IO memory to kernel address space so that we can access it.
17 * Because the ARM only has a 32-bit address space we can't address the
18 * whole of the (physical) PCI space at once. PCI huge-mode addressing
40 #include <asm/mach/map.h>
51 struct vm_struct *vm; in find_static_vm_paddr() local
54 vm = &svm->vm; in find_static_vm_paddr()
55 if (!(vm->flags & VM_ARM_STATIC_MAPPING)) in find_static_vm_paddr()
57 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) in find_static_vm_paddr()
60 if (vm->phys_addr > paddr || in find_static_vm_paddr()
61 paddr + size - 1 > vm->phys_addr + vm->size - 1) in find_static_vm_paddr()
73 struct vm_struct *vm; in find_static_vm_vaddr() local
76 vm = &svm->vm; in find_static_vm_vaddr()
79 if (vm->addr > vaddr) in find_static_vm_vaddr()
82 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr) in find_static_vm_vaddr()
92 struct vm_struct *vm; in add_static_vm_early() local
95 vm = &svm->vm; in add_static_vm_early()
96 vm_area_add_early(vm); in add_static_vm_early()
97 vaddr = vm->addr; in add_static_vm_early()
100 vm = &curr_svm->vm; in add_static_vm_early()
102 if (vm->addr > vaddr) in add_static_vm_early()
105 list_add_tail(&svm->list, &curr_svm->list); in add_static_vm_early()
112 __pgprot(mtype->prot_pte)); in ioremap_page()
124 sizeof(pgd_t) * (pgd_index(VMALLOC_END) - in __check_vmalloc_seq()
126 mm->context.vmalloc_seq = seq; in __check_vmalloc_seq()
132 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
143 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); in unmap_area_sections()
172 * Ensure that the active_mm is up to date - we want to in unmap_area_sections()
173 * catch any use-after-iounmap cases. in unmap_area_sections()
175 if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq) in unmap_area_sections()
176 __check_vmalloc_seq(current->active_mm); in unmap_area_sections()
189 * Remove and free any PTE-based mapping, and in remap_area_sections()
195 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); in remap_area_sections()
197 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); in remap_area_sections()
216 * Remove and free any PTE-based mapping, and in remap_area_supersections()
223 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | in remap_area_supersections()
225 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; in remap_area_supersections()
277 addr = (unsigned long)svm->vm.addr; in __arm_ioremap_pfn_caller()
278 addr += paddr - svm->vm.phys_addr; in __arm_ioremap_pfn_caller()
284 * Don't allow RAM to be mapped with mismatched attributes - this in __arm_ioremap_pfn_caller()
293 addr = (unsigned long)area->addr; in __arm_ioremap_pfn_caller()
294 area->phys_addr = paddr; in __arm_ioremap_pfn_caller()
301 area->flags |= VM_ARM_SECTION_MAPPING; in __arm_ioremap_pfn_caller()
304 area->flags |= VM_ARM_SECTION_MAPPING; in __arm_ioremap_pfn_caller()
309 __pgprot(type->prot_pte)); in __arm_ioremap_pfn_caller()
330 last_addr = phys_addr + size - 1; in __arm_ioremap_caller()
343 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
344 * have to convert them into an offset in a page-aligned mapping, but the
421 struct vm_struct *vm; in __iounmap() local
423 vm = find_vm_area(addr); in __iounmap()
427 * specially as the VM subsystem does not know how to handle in __iounmap()
430 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING)) in __iounmap()
431 unmap_area_sections((unsigned long)vm->addr, vm->size); in __iounmap()
456 BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT); in pci_ioremap_io()
461 __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte)); in pci_ioremap_io()