Lines Matching +full:vm +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1995-2005 Russell King
34 #include <asm/mach/map.h>
46 * zero-initialized data and COW.
52 * The pmd table for the upper-most set of pages.
143 int i, selected = -1; in early_cachepolicy()
154 if (selected == -1) in early_cachepolicy()
238 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
434 pr_warn("Forcing write-allocate cache policy for SMP\n"); in build_mem_type_table()
445 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those in build_mem_type_table()
457 * "update-able on write" bit on ARM610). However, Xscale and in build_mem_type_table()
480 * Mark device regions on ARMv6+ as execute-never in build_mem_type_table()
494 * - shared device is SXCB=1100 in build_mem_type_table()
495 * - nonshared device is SXCB=0100 in build_mem_type_table()
496 * - write combine device mem is SXCB=0001 in build_mem_type_table()
505 * - shared device is TEXCB=00101 in build_mem_type_table()
506 * - nonshared device is TEXCB=01000 in build_mem_type_table()
507 * - write combine device mem is TEXCB=00100 in build_mem_type_table()
516 * - shared device is TEXCB=00001 in build_mem_type_table()
517 * - nonshared device is TEXCB=01000 in build_mem_type_table()
518 * - write combine device mem is TEXCB=00100 in build_mem_type_table()
533 * Now deal with the memory-type mappings in build_mem_type_table()
536 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; in build_mem_type_table()
542 * r/o, kernel r/w to map the vectors page. in build_mem_type_table()
549 * in the Short-descriptor translation table format descriptors. in build_mem_type_table()
595 * Non-cacheable Normal - intended for memory areas that must in build_mem_type_table()
600 /* Non-cacheable Normal is XCB = 001 */ in build_mem_type_table()
604 /* For both ARMv6 and non-TEX-remapping ARMv7 */ in build_mem_type_table()
644 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; in build_mem_type_table()
646 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; in build_mem_type_table()
650 mem_types[MT_ROM].prot_sect |= cp->pmd; in build_mem_type_table()
652 switch (cp->pmd) { in build_mem_type_table()
662 ecc_mask ? "ECC enabled, " : "", cp->policy); in build_mem_type_table()
666 if (t->prot_l1) in build_mem_type_table()
667 t->prot_l1 |= PMD_DOMAIN(t->domain); in build_mem_type_table()
668 if (t->prot_sect) in build_mem_type_table()
669 t->prot_sect |= PMD_DOMAIN(t->domain); in build_mem_type_table()
679 else if (file->f_flags & O_SYNC) in phys_mem_access_prot()
732 pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc); in alloc_init_pte()
734 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), in alloc_init_pte()
754 * (See arch/arm/include/asm/pgtable-2level.h) in __map_init_section()
760 *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0)); in __map_init_section()
777 * With LPAE, we must loop over to map in alloc_init_pmd()
783 * Try a section mapping - addr, next and phys must all be in alloc_init_pmd()
786 if (type->prot_sect && in alloc_init_pmd()
794 phys += next - addr; in alloc_init_pmd()
810 phys += next - addr; in alloc_init_pud()
825 phys += next - addr; in alloc_init_p4d()
839 addr = md->virtual; in create_36bit_mapping()
840 phys = __pfn_to_phys(md->pfn); in create_36bit_mapping()
841 length = PAGE_ALIGN(md->length); in create_36bit_mapping()
845 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
855 if (type->domain) { in create_36bit_mapping()
857 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
861 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { in create_36bit_mapping()
863 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
871 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); in create_36bit_mapping()
882 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER | in create_36bit_mapping()
901 type = &mem_types[md->type]; in __create_mapping()
905 * Catch 36-bit addresses in __create_mapping()
907 if (md->pfn >= 0x100000) { in __create_mapping()
913 addr = md->virtual & PAGE_MASK; in __create_mapping()
914 phys = __pfn_to_phys(md->pfn); in __create_mapping()
915 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); in __create_mapping()
917 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { in __create_mapping()
918 pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n", in __create_mapping()
919 (long long)__pfn_to_phys(md->pfn), addr); in __create_mapping()
930 phys += next - addr; in __create_mapping()
944 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { in create_mapping()
946 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); in create_mapping()
950 if (md->type == MT_DEVICE && in create_mapping()
951 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && in create_mapping()
952 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { in create_mapping()
954 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); in create_mapping()
967 p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); in create_mapping_late()
970 pud = pud_alloc(mm, p4d, md->virtual); in create_mapping_late()
984 struct vm_struct *vm; in iotable_init() local
995 for (md = io_desc; nr; md++, nr--) { in iotable_init()
998 vm = &svm->vm; in iotable_init()
999 vm->addr = (void *)(md->virtual & PAGE_MASK); in iotable_init()
1000 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); in iotable_init()
1001 vm->phys_addr = __pfn_to_phys(md->pfn); in iotable_init()
1002 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; in iotable_init()
1003 vm->flags |= VM_ARM_MTYPE(md->type); in iotable_init()
1004 vm->caller = iotable_init; in iotable_init()
1012 struct vm_struct *vm; in vm_reserve_area_early() local
1020 vm = &svm->vm; in vm_reserve_area_early()
1021 vm->addr = (void *)addr; in vm_reserve_area_early()
1022 vm->size = size; in vm_reserve_area_early()
1023 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; in vm_reserve_area_early()
1024 vm->caller = caller; in vm_reserve_area_early()
1032 * (see definition in include/asm/pgtable-2level.h). However a call to
1039 * Let's avoid the issue by inserting dummy vm entries covering the unused
1051 struct vm_struct *vm; in fill_pmd_gaps() local
1056 vm = &svm->vm; in fill_pmd_gaps()
1057 addr = (unsigned long)vm->addr; in fill_pmd_gaps()
1062 * Check if this vm starts on an odd section boundary. in fill_pmd_gaps()
1073 * Then check if this vm ends on an odd section boundary. in fill_pmd_gaps()
1077 addr += vm->size; in fill_pmd_gaps()
1084 /* no need to look at any vm entry until we hit the next PMD */ in fill_pmd_gaps()
1085 next = (addr + PMD_SIZE - 1) & PMD_MASK; in fill_pmd_gaps()
1111 struct map_desc map; in debug_ll_io_init() local
1113 debug_ll_addr(&map.pfn, &map.virtual); in debug_ll_io_init()
1114 if (!map.pfn || !map.virtual) in debug_ll_io_init()
1116 map.pfn = __phys_to_pfn(map.pfn); in debug_ll_io_init()
1117 map.virtual &= PAGE_MASK; in debug_ll_io_init()
1118 map.length = PAGE_SIZE; in debug_ll_io_init()
1119 map.type = MT_DEVICE; in debug_ll_io_init()
1120 iotable_init(&map, 1); in debug_ll_io_init()
1129 * area - the default is 240MiB.
1142 vmalloc_max = VMALLOC_END - (PAGE_OFFSET + SZ_32M + VMALLOC_OFFSET); in early_vmalloc()
1164 * not affected by wrap-arounds when sizeof(phys_addr_t) == 4. in adjust_lowmem_bounds()
1169 vmalloc_limit = (u64)VMALLOC_END - vmalloc_size - VMALLOC_OFFSET - in adjust_lowmem_bounds()
1180 len = round_up(block_start, PMD_SIZE) - block_start; in adjust_lowmem_bounds()
1200 * Find the first non-pmd-aligned page, and point in adjust_lowmem_bounds()
1202 * limit down to be pmd-aligned, which happens at the in adjust_lowmem_bounds()
1206 * bank can be non-pmd-aligned. The only exception is in adjust_lowmem_bounds()
1207 * that the start of the bank 0 must be section- in adjust_lowmem_bounds()
1224 high_memory = __va(arm_lowmem_limit - 1) + 1; in adjust_lowmem_bounds()
1240 pr_notice("Ignoring RAM at %pa-%pa\n", in adjust_lowmem_bounds()
1244 memblock_remove(memblock_limit, end - memblock_limit); in adjust_lowmem_bounds()
1269 * are using a thumb-compiled kernel, there there will be 8MB more in prepare_page_table()
1280 /* The XIP kernel is mapped in the module area -- skip over it */ in prepare_page_table()
1281 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK; in prepare_page_table()
1324 * precious DMA-able memory... in arm_mm_memblock_reserve()
1326 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); in arm_mm_memblock_reserve()
1339 struct map_desc map; in devicemaps_init() local
1357 /* create a read-only mapping of the device tree */ in devicemaps_init()
1358 map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK); in devicemaps_init()
1359 map.virtual = FDT_FIXED_BASE; in devicemaps_init()
1360 map.length = FDT_FIXED_SIZE; in devicemaps_init()
1361 map.type = MT_ROM; in devicemaps_init()
1362 create_mapping(&map); in devicemaps_init()
1366 * Map the kernel if it is XIP. in devicemaps_init()
1370 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); in devicemaps_init()
1371 map.virtual = MODULES_VADDR; in devicemaps_init()
1372 map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; in devicemaps_init()
1373 map.type = MT_ROM; in devicemaps_init()
1374 create_mapping(&map); in devicemaps_init()
1378 * Map the cache flushing regions. in devicemaps_init()
1381 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); in devicemaps_init()
1382 map.virtual = FLUSH_BASE; in devicemaps_init()
1383 map.length = SZ_1M; in devicemaps_init()
1384 map.type = MT_CACHECLEAN; in devicemaps_init()
1385 create_mapping(&map); in devicemaps_init()
1388 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); in devicemaps_init()
1389 map.virtual = FLUSH_BASE_MINICACHE; in devicemaps_init()
1390 map.length = SZ_1M; in devicemaps_init()
1391 map.type = MT_MINICLEAN; in devicemaps_init()
1392 create_mapping(&map); in devicemaps_init()
1396 * Create a mapping for the machine vectors at the high-vectors in devicemaps_init()
1397 * location (0xffff0000). If we aren't using high-vectors, also in devicemaps_init()
1398 * create a mapping at the low-vectors virtual address. in devicemaps_init()
1400 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); in devicemaps_init()
1401 map.virtual = 0xffff0000; in devicemaps_init()
1402 map.length = PAGE_SIZE; in devicemaps_init()
1404 map.type = MT_HIGH_VECTORS; in devicemaps_init()
1406 map.type = MT_LOW_VECTORS; in devicemaps_init()
1408 create_mapping(&map); in devicemaps_init()
1411 map.virtual = 0; in devicemaps_init()
1412 map.length = PAGE_SIZE * 2; in devicemaps_init()
1413 map.type = MT_LOW_VECTORS; in devicemaps_init()
1414 create_mapping(&map); in devicemaps_init()
1417 /* Now create a kernel read-only mapping */ in devicemaps_init()
1418 map.pfn += 1; in devicemaps_init()
1419 map.virtual = 0xffff0000 + PAGE_SIZE; in devicemaps_init()
1420 map.length = PAGE_SIZE; in devicemaps_init()
1421 map.type = MT_LOW_VECTORS; in devicemaps_init()
1422 create_mapping(&map); in devicemaps_init()
1425 * Ask the machine support to map in the statically mapped devices. in devicemaps_init()
1427 if (mdesc->map_io) in devicemaps_init()
1428 mdesc->map_io(); in devicemaps_init()
1439 * any write-allocated cache lines in the vector page are written in devicemaps_init()
1465 /* Map all the lowmem memory banks. */ in map_lowmem()
1467 struct map_desc map; in map_lowmem() local
1469 pr_debug("map lowmem start: 0x%08llx, end: 0x%08llx\n", in map_lowmem()
1485 * the kernel memory from it and map each part separately. We in map_lowmem()
1488 * +--------+ +--------+ in map_lowmem()
1489 * +-- start --+ +--------+ | Kernel | | Kernel | in map_lowmem()
1491 * | | | case 1 | +--------+ | | +--------+ in map_lowmem()
1492 * | Memory | +--------+ | | | Kernel | in map_lowmem()
1493 * | range | +--------+ | | | case 6 | in map_lowmem()
1494 * | | | Kernel | +--------+ | | +--------+ in map_lowmem()
1496 * +-- end ----+ +--------+ | case 4 | | | in map_lowmem()
1497 * +--------+ +--------+ in map_lowmem()
1500 /* Case 5: kernel covers range, don't map anything, should be rare */ in map_lowmem()
1508 /* Map memory below the kernel */ in map_lowmem()
1509 map.pfn = __phys_to_pfn(start); in map_lowmem()
1510 map.virtual = __phys_to_virt(start); in map_lowmem()
1511 map.length = kernel_sec_start - start; in map_lowmem()
1512 map.type = MT_MEMORY_RW; in map_lowmem()
1513 create_mapping(&map); in map_lowmem()
1514 /* Map memory above the kernel */ in map_lowmem()
1515 map.pfn = __phys_to_pfn(kernel_sec_end); in map_lowmem()
1516 map.virtual = __phys_to_virt(kernel_sec_end); in map_lowmem()
1517 map.length = end - kernel_sec_end; in map_lowmem()
1518 map.type = MT_MEMORY_RW; in map_lowmem()
1519 create_mapping(&map); in map_lowmem()
1535 map.pfn = __phys_to_pfn(start); in map_lowmem()
1536 map.virtual = __phys_to_virt(start); in map_lowmem()
1537 map.length = end - start; in map_lowmem()
1538 map.type = MT_MEMORY_RW; in map_lowmem()
1539 create_mapping(&map); in map_lowmem()
1550 * +----------------+ kernel_x_start in map_kernel()
1553 * +----------------+ kernel_x_end / kernel_nx_start in map_kernel()
1554 * | Non-executable | in map_kernel()
1556 * +----------------+ kernel_nx_end in map_kernel()
1562 * non-executable part of the kernel memory is actually mapped as executable. in map_kernel()
1570 struct map_desc map; in map_kernel() local
1572 map.pfn = __phys_to_pfn(kernel_x_start); in map_kernel()
1573 map.virtual = __phys_to_virt(kernel_x_start); in map_kernel()
1574 map.length = kernel_x_end - kernel_x_start; in map_kernel()
1575 map.type = MT_MEMORY_RWX; in map_kernel()
1576 create_mapping(&map); in map_kernel()
1582 map.pfn = __phys_to_pfn(kernel_nx_start); in map_kernel()
1583 map.virtual = __phys_to_virt(kernel_nx_start); in map_kernel()
1584 map.length = kernel_nx_end - kernel_nx_start; in map_kernel()
1585 map.type = MT_MEMORY_RW; in map_kernel()
1586 create_mapping(&map); in map_kernel()
1604 if (!mdesc->pv_fixup) in early_paging_init()
1607 offset = mdesc->pv_fixup(); in early_paging_init()
1631 /* Re-set the phys pfn offset, and the pv offset */ in early_paging_init()
1637 (&__pv_table_end - &__pv_table_begin) << 2); in early_paging_init()
1656 * Fixup the page tables - this must be in the idmap region as in early_paging_init()
1663 /* Re-enable the caches and cacheable TLB walks */ in early_paging_init()
1674 if (!mdesc->pv_fixup) in early_paging_init()
1677 offset = mdesc->pv_fixup(); in early_paging_init()
1692 unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1); in early_fixmap_shutdown()
1700 struct map_desc map; in early_fixmap_shutdown() local
1702 map.virtual = fix_to_virt(i); in early_fixmap_shutdown()
1703 pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual); in early_fixmap_shutdown()
1710 map.pfn = pte_pfn(*pte); in early_fixmap_shutdown()
1711 map.type = MT_DEVICE; in early_fixmap_shutdown()
1712 map.length = PAGE_SIZE; in early_fixmap_shutdown()
1714 create_mapping(&map); in early_fixmap_shutdown()
1726 pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n", in paging_init()