Lines Matching +full:vm +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1995-2005 Russell King
34 #include <asm/mach/map.h>
44 * zero-initialized data and COW.
50 * The pmd table for the upper-most set of pages.
141 int i, selected = -1; in early_cachepolicy()
152 if (selected == -1) in early_cachepolicy()
236 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
433 pr_warn("Forcing write-allocate cache policy for SMP\n"); in build_mem_type_table()
444 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those in build_mem_type_table()
456 * "update-able on write" bit on ARM610). However, Xscale and in build_mem_type_table()
479 * Mark device regions on ARMv6+ as execute-never in build_mem_type_table()
493 * - shared device is SXCB=1100 in build_mem_type_table()
494 * - nonshared device is SXCB=0100 in build_mem_type_table()
495 * - write combine device mem is SXCB=0001 in build_mem_type_table()
504 * - shared device is TEXCB=00101 in build_mem_type_table()
505 * - nonshared device is TEXCB=01000 in build_mem_type_table()
506 * - write combine device mem is TEXCB=00100 in build_mem_type_table()
515 * - shared device is TEXCB=00001 in build_mem_type_table()
516 * - nonshared device is TEXCB=01000 in build_mem_type_table()
517 * - write combine device mem is TEXCB=00100 in build_mem_type_table()
532 * Now deal with the memory-type mappings in build_mem_type_table()
535 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; in build_mem_type_table()
541 * r/o, kernel r/w to map the vectors page. in build_mem_type_table()
548 * in the Short-descriptor translation table format descriptors. in build_mem_type_table()
594 * Non-cacheable Normal - intended for memory areas that must in build_mem_type_table()
599 /* Non-cacheable Normal is XCB = 001 */ in build_mem_type_table()
603 /* For both ARMv6 and non-TEX-remapping ARMv7 */ in build_mem_type_table()
643 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; in build_mem_type_table()
645 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; in build_mem_type_table()
649 mem_types[MT_ROM].prot_sect |= cp->pmd; in build_mem_type_table()
651 switch (cp->pmd) { in build_mem_type_table()
661 ecc_mask ? "ECC enabled, " : "", cp->policy); in build_mem_type_table()
665 if (t->prot_l1) in build_mem_type_table()
666 t->prot_l1 |= PMD_DOMAIN(t->domain); in build_mem_type_table()
667 if (t->prot_sect) in build_mem_type_table()
668 t->prot_sect |= PMD_DOMAIN(t->domain); in build_mem_type_table()
678 else if (file->f_flags & O_SYNC) in phys_mem_access_prot()
731 pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc); in alloc_init_pte()
733 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), in alloc_init_pte()
753 * (See arch/arm/include/asm/pgtable-2level.h) in __map_init_section()
759 *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0)); in __map_init_section()
776 * With LPAE, we must loop over to map in alloc_init_pmd()
782 * Try a section mapping - addr, next and phys must all be in alloc_init_pmd()
785 if (type->prot_sect && in alloc_init_pmd()
793 phys += next - addr; in alloc_init_pmd()
809 phys += next - addr; in alloc_init_pud()
824 phys += next - addr; in alloc_init_p4d()
838 addr = md->virtual; in create_36bit_mapping()
839 phys = __pfn_to_phys(md->pfn); in create_36bit_mapping()
840 length = PAGE_ALIGN(md->length); in create_36bit_mapping()
844 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
854 if (type->domain) { in create_36bit_mapping()
856 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
860 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { in create_36bit_mapping()
862 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
870 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); in create_36bit_mapping()
881 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER | in create_36bit_mapping()
900 type = &mem_types[md->type]; in __create_mapping()
904 * Catch 36-bit addresses in __create_mapping()
906 if (md->pfn >= 0x100000) { in __create_mapping()
912 addr = md->virtual & PAGE_MASK; in __create_mapping()
913 phys = __pfn_to_phys(md->pfn); in __create_mapping()
914 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); in __create_mapping()
916 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { in __create_mapping()
917 pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n", in __create_mapping()
918 (long long)__pfn_to_phys(md->pfn), addr); in __create_mapping()
929 phys += next - addr; in __create_mapping()
943 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { in create_mapping()
945 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); in create_mapping()
949 if ((md->type == MT_DEVICE || md->type == MT_ROM) && in create_mapping()
950 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && in create_mapping()
951 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { in create_mapping()
953 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); in create_mapping()
966 p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); in create_mapping_late()
969 pud = pud_alloc(mm, p4d, md->virtual); in create_mapping_late()
983 struct vm_struct *vm; in iotable_init() local
994 for (md = io_desc; nr; md++, nr--) { in iotable_init()
997 vm = &svm->vm; in iotable_init()
998 vm->addr = (void *)(md->virtual & PAGE_MASK); in iotable_init()
999 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); in iotable_init()
1000 vm->phys_addr = __pfn_to_phys(md->pfn); in iotable_init()
1001 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; in iotable_init()
1002 vm->flags |= VM_ARM_MTYPE(md->type); in iotable_init()
1003 vm->caller = iotable_init; in iotable_init()
1011 struct vm_struct *vm; in vm_reserve_area_early() local
1019 vm = &svm->vm; in vm_reserve_area_early()
1020 vm->addr = (void *)addr; in vm_reserve_area_early()
1021 vm->size = size; in vm_reserve_area_early()
1022 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; in vm_reserve_area_early()
1023 vm->caller = caller; in vm_reserve_area_early()
1031 * (see definition in include/asm/pgtable-2level.h). However a call to
1038 * Let's avoid the issue by inserting dummy vm entries covering the unused
1050 struct vm_struct *vm; in fill_pmd_gaps() local
1055 vm = &svm->vm; in fill_pmd_gaps()
1056 addr = (unsigned long)vm->addr; in fill_pmd_gaps()
1061 * Check if this vm starts on an odd section boundary. in fill_pmd_gaps()
1072 * Then check if this vm ends on an odd section boundary. in fill_pmd_gaps()
1076 addr += vm->size; in fill_pmd_gaps()
1083 /* no need to look at any vm entry until we hit the next PMD */ in fill_pmd_gaps()
1084 next = (addr + PMD_SIZE - 1) & PMD_MASK; in fill_pmd_gaps()
1110 struct map_desc map; in debug_ll_io_init() local
1112 debug_ll_addr(&map.pfn, &map.virtual); in debug_ll_io_init()
1113 if (!map.pfn || !map.virtual) in debug_ll_io_init()
1115 map.pfn = __phys_to_pfn(map.pfn); in debug_ll_io_init()
1116 map.virtual &= PAGE_MASK; in debug_ll_io_init()
1117 map.length = PAGE_SIZE; in debug_ll_io_init()
1118 map.type = MT_DEVICE; in debug_ll_io_init()
1119 iotable_init(&map, 1); in debug_ll_io_init()
1124 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
1129 * area - the default is 240m.
1141 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { in early_vmalloc()
1142 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); in early_vmalloc()
1147 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); in early_vmalloc()
1162 * not affected by wrap-arounds when sizeof(phys_addr_t) == 4. in adjust_lowmem_bounds()
1167 vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; in adjust_lowmem_bounds()
1177 len = round_up(block_start, PMD_SIZE) - block_start; in adjust_lowmem_bounds()
1197 * Find the first non-pmd-aligned page, and point in adjust_lowmem_bounds()
1199 * limit down to be pmd-aligned, which happens at the in adjust_lowmem_bounds()
1203 * bank can be non-pmd-aligned. The only exception is in adjust_lowmem_bounds()
1204 * that the start of the bank 0 must be section- in adjust_lowmem_bounds()
1221 high_memory = __va(arm_lowmem_limit - 1) + 1; in adjust_lowmem_bounds()
1237 pr_notice("Ignoring RAM at %pa-%pa\n", in adjust_lowmem_bounds()
1241 memblock_remove(memblock_limit, end - memblock_limit); in adjust_lowmem_bounds()
1260 /* The XIP kernel is mapped in the module area -- skip over it */ in prepare_page_table()
1261 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK; in prepare_page_table()
1304 * precious DMA-able memory... in arm_mm_memblock_reserve()
1306 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); in arm_mm_memblock_reserve()
1319 struct map_desc map; in devicemaps_init() local
1337 * Map the kernel if it is XIP. in devicemaps_init()
1341 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); in devicemaps_init()
1342 map.virtual = MODULES_VADDR; in devicemaps_init()
1343 map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; in devicemaps_init()
1344 map.type = MT_ROM; in devicemaps_init()
1345 create_mapping(&map); in devicemaps_init()
1349 * Map the cache flushing regions. in devicemaps_init()
1352 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); in devicemaps_init()
1353 map.virtual = FLUSH_BASE; in devicemaps_init()
1354 map.length = SZ_1M; in devicemaps_init()
1355 map.type = MT_CACHECLEAN; in devicemaps_init()
1356 create_mapping(&map); in devicemaps_init()
1359 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); in devicemaps_init()
1360 map.virtual = FLUSH_BASE_MINICACHE; in devicemaps_init()
1361 map.length = SZ_1M; in devicemaps_init()
1362 map.type = MT_MINICLEAN; in devicemaps_init()
1363 create_mapping(&map); in devicemaps_init()
1367 * Create a mapping for the machine vectors at the high-vectors in devicemaps_init()
1368 * location (0xffff0000). If we aren't using high-vectors, also in devicemaps_init()
1369 * create a mapping at the low-vectors virtual address. in devicemaps_init()
1371 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); in devicemaps_init()
1372 map.virtual = 0xffff0000; in devicemaps_init()
1373 map.length = PAGE_SIZE; in devicemaps_init()
1375 map.type = MT_HIGH_VECTORS; in devicemaps_init()
1377 map.type = MT_LOW_VECTORS; in devicemaps_init()
1379 create_mapping(&map); in devicemaps_init()
1382 map.virtual = 0; in devicemaps_init()
1383 map.length = PAGE_SIZE * 2; in devicemaps_init()
1384 map.type = MT_LOW_VECTORS; in devicemaps_init()
1385 create_mapping(&map); in devicemaps_init()
1388 /* Now create a kernel read-only mapping */ in devicemaps_init()
1389 map.pfn += 1; in devicemaps_init()
1390 map.virtual = 0xffff0000 + PAGE_SIZE; in devicemaps_init()
1391 map.length = PAGE_SIZE; in devicemaps_init()
1392 map.type = MT_LOW_VECTORS; in devicemaps_init()
1393 create_mapping(&map); in devicemaps_init()
1396 * Ask the machine support to map in the statically mapped devices. in devicemaps_init()
1398 if (mdesc->map_io) in devicemaps_init()
1399 mdesc->map_io(); in devicemaps_init()
1410 * any write-allocated cache lines in the vector page are written in devicemaps_init()
1438 /* Map all the lowmem memory banks. */ in map_lowmem()
1440 struct map_desc map; in map_lowmem() local
1448 map.pfn = __phys_to_pfn(start); in map_lowmem()
1449 map.virtual = __phys_to_virt(start); in map_lowmem()
1450 map.length = end - start; in map_lowmem()
1451 map.type = MT_MEMORY_RWX; in map_lowmem()
1453 create_mapping(&map); in map_lowmem()
1455 map.pfn = __phys_to_pfn(start); in map_lowmem()
1456 map.virtual = __phys_to_virt(start); in map_lowmem()
1457 map.length = end - start; in map_lowmem()
1458 map.type = MT_MEMORY_RW; in map_lowmem()
1460 create_mapping(&map); in map_lowmem()
1464 map.pfn = __phys_to_pfn(start); in map_lowmem()
1465 map.virtual = __phys_to_virt(start); in map_lowmem()
1466 map.length = kernel_x_start - start; in map_lowmem()
1467 map.type = MT_MEMORY_RW; in map_lowmem()
1469 create_mapping(&map); in map_lowmem()
1472 map.pfn = __phys_to_pfn(kernel_x_start); in map_lowmem()
1473 map.virtual = __phys_to_virt(kernel_x_start); in map_lowmem()
1474 map.length = kernel_x_end - kernel_x_start; in map_lowmem()
1475 map.type = MT_MEMORY_RWX; in map_lowmem()
1477 create_mapping(&map); in map_lowmem()
1480 map.pfn = __phys_to_pfn(kernel_x_end); in map_lowmem()
1481 map.virtual = __phys_to_virt(kernel_x_end); in map_lowmem()
1482 map.length = end - kernel_x_end; in map_lowmem()
1483 map.type = MT_MEMORY_RW; in map_lowmem()
1485 create_mapping(&map); in map_lowmem()
1508 if (!mdesc->pv_fixup) in early_paging_init()
1511 offset = mdesc->pv_fixup(); in early_paging_init()
1529 /* Re-set the phys pfn offset, and the pv offset */ in early_paging_init()
1535 (&__pv_table_end - &__pv_table_begin) << 2); in early_paging_init()
1554 * Fixup the page tables - this must be in the idmap region as in early_paging_init()
1561 /* Re-enable the caches and cacheable TLB walks */ in early_paging_init()
1572 if (!mdesc->pv_fixup) in early_paging_init()
1575 offset = mdesc->pv_fixup(); in early_paging_init()
1590 unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1); in early_fixmap_shutdown()
1598 struct map_desc map; in early_fixmap_shutdown() local
1600 map.virtual = fix_to_virt(i); in early_fixmap_shutdown()
1601 pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual); in early_fixmap_shutdown()
1608 map.pfn = pte_pfn(*pte); in early_fixmap_shutdown()
1609 map.type = MT_DEVICE; in early_fixmap_shutdown()
1610 map.length = PAGE_SIZE; in early_fixmap_shutdown()
1612 create_mapping(&map); in early_fixmap_shutdown()