Lines Matching +full:non +full:- +full:armv7
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1995-2005 Russell King
44 * zero-initialized data and COW.
50 * The pmd table for the upper-most set of pages.
158 int i, selected = -1; in early_cachepolicy()
169 if (selected == -1) in early_cachepolicy()
256 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
458 pr_warn("Forcing write-allocate cache policy for SMP\n"); in build_mem_type_table()
469 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those in build_mem_type_table()
481 * "update-able on write" bit on ARM610). However, Xscale and in build_mem_type_table()
504 * Mark device regions on ARMv6+ as execute-never in build_mem_type_table()
517 * For ARMv7 with TEX remapping, in build_mem_type_table()
518 * - shared device is SXCB=1100 in build_mem_type_table()
519 * - nonshared device is SXCB=0100 in build_mem_type_table()
520 * - write combine device mem is SXCB=0001 in build_mem_type_table()
529 * - shared device is TEXCB=00101 in build_mem_type_table()
530 * - nonshared device is TEXCB=01000 in build_mem_type_table()
531 * - write combine device mem is TEXCB=00100 in build_mem_type_table()
539 * For ARMv6 and ARMv7 without TEX remapping, in build_mem_type_table()
540 * - shared device is TEXCB=00001 in build_mem_type_table()
541 * - nonshared device is TEXCB=01000 in build_mem_type_table()
542 * - write combine device mem is TEXCB=00100 in build_mem_type_table()
557 * Now deal with the memory-type mappings in build_mem_type_table()
560 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; in build_mem_type_table()
561 s2_pgprot = cp->pte_s2; in build_mem_type_table()
576 * in the Short-descriptor translation table format descriptors. in build_mem_type_table()
623 * Non-cacheable Normal - intended for memory areas that must in build_mem_type_table()
628 /* Non-cacheable Normal is XCB = 001 */ in build_mem_type_table()
632 /* For both ARMv6 and non-TEX-remapping ARMv7 */ in build_mem_type_table()
675 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; in build_mem_type_table()
677 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; in build_mem_type_table()
681 mem_types[MT_ROM].prot_sect |= cp->pmd; in build_mem_type_table()
683 switch (cp->pmd) { in build_mem_type_table()
693 ecc_mask ? "ECC enabled, " : "", cp->policy); in build_mem_type_table()
697 if (t->prot_l1) in build_mem_type_table()
698 t->prot_l1 |= PMD_DOMAIN(t->domain); in build_mem_type_table()
699 if (t->prot_sect) in build_mem_type_table()
700 t->prot_sect |= PMD_DOMAIN(t->domain); in build_mem_type_table()
710 else if (file->f_flags & O_SYNC) in phys_mem_access_prot()
763 pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc); in alloc_init_pte()
765 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), in alloc_init_pte()
785 * (See arch/arm/include/asm/pgtable-2level.h) in __map_init_section()
791 *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0)); in __map_init_section()
814 * Try a section mapping - addr, next and phys must all be in alloc_init_pmd()
817 if (type->prot_sect && in alloc_init_pmd()
825 phys += next - addr; in alloc_init_pmd()
841 phys += next - addr; in alloc_init_pud()
855 addr = md->virtual; in create_36bit_mapping()
856 phys = __pfn_to_phys(md->pfn); in create_36bit_mapping()
857 length = PAGE_ALIGN(md->length); in create_36bit_mapping()
861 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
871 if (type->domain) { in create_36bit_mapping()
873 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
877 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { in create_36bit_mapping()
879 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
887 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); in create_36bit_mapping()
897 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER | in create_36bit_mapping()
916 type = &mem_types[md->type]; in __create_mapping()
920 * Catch 36-bit addresses in __create_mapping()
922 if (md->pfn >= 0x100000) { in __create_mapping()
928 addr = md->virtual & PAGE_MASK; in __create_mapping()
929 phys = __pfn_to_phys(md->pfn); in __create_mapping()
930 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); in __create_mapping()
932 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { in __create_mapping()
934 (long long)__pfn_to_phys(md->pfn), addr); in __create_mapping()
945 phys += next - addr; in __create_mapping()
959 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { in create_mapping()
961 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); in create_mapping()
965 if ((md->type == MT_DEVICE || md->type == MT_ROM) && in create_mapping()
966 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && in create_mapping()
967 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { in create_mapping()
969 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); in create_mapping()
979 pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); in create_mapping_late()
1004 for (md = io_desc; nr; md++, nr--) { in iotable_init()
1007 vm = &svm->vm; in iotable_init()
1008 vm->addr = (void *)(md->virtual & PAGE_MASK); in iotable_init()
1009 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); in iotable_init()
1010 vm->phys_addr = __pfn_to_phys(md->pfn); in iotable_init()
1011 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; in iotable_init()
1012 vm->flags |= VM_ARM_MTYPE(md->type); in iotable_init()
1013 vm->caller = iotable_init; in iotable_init()
1029 vm = &svm->vm; in vm_reserve_area_early()
1030 vm->addr = (void *)addr; in vm_reserve_area_early()
1031 vm->size = size; in vm_reserve_area_early()
1032 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; in vm_reserve_area_early()
1033 vm->caller = caller; in vm_reserve_area_early()
1041 * (see definition in include/asm/pgtable-2level.h). However a call to
1065 vm = &svm->vm; in fill_pmd_gaps()
1066 addr = (unsigned long)vm->addr; in fill_pmd_gaps()
1086 addr += vm->size; in fill_pmd_gaps()
1094 next = (addr + PMD_SIZE - 1) & PMD_MASK; in fill_pmd_gaps()
1134 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
1139 * area - the default is 240m.
1151 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { in early_vmalloc()
1152 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); in early_vmalloc()
1157 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); in early_vmalloc()
1173 * not affected by wrap-arounds when sizeof(phys_addr_t) == 4. in adjust_lowmem_bounds()
1178 vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; in adjust_lowmem_bounds()
1186 if (!IS_ALIGNED(reg->base, PMD_SIZE)) { in adjust_lowmem_bounds()
1189 len = round_up(reg->base, PMD_SIZE) - reg->base; in adjust_lowmem_bounds()
1190 memblock_mark_nomap(reg->base, len); in adjust_lowmem_bounds()
1197 phys_addr_t block_start = reg->base; in adjust_lowmem_bounds()
1198 phys_addr_t block_end = reg->base + reg->size; in adjust_lowmem_bounds()
1203 if (reg->base < vmalloc_limit) { in adjust_lowmem_bounds()
1216 * Find the first non-pmd-aligned page, and point in adjust_lowmem_bounds()
1218 * limit down to be pmd-aligned, which happens at the in adjust_lowmem_bounds()
1222 * bank can be non-pmd-aligned. The only exception is in adjust_lowmem_bounds()
1223 * that the start of the bank 0 must be section- in adjust_lowmem_bounds()
1240 high_memory = __va(arm_lowmem_limit - 1) + 1; in adjust_lowmem_bounds()
1256 pr_notice("Ignoring RAM at %pa-%pa\n", in adjust_lowmem_bounds()
1260 memblock_remove(memblock_limit, end - memblock_limit); in adjust_lowmem_bounds()
1279 /* The XIP kernel is mapped in the module area -- skip over it */ in prepare_page_table()
1280 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK; in prepare_page_table()
1323 * precious DMA-able memory... in arm_mm_memblock_reserve()
1325 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); in arm_mm_memblock_reserve()
1362 map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; in devicemaps_init()
1386 * Create a mapping for the machine vectors at the high-vectors in devicemaps_init()
1387 * location (0xffff0000). If we aren't using high-vectors, also in devicemaps_init()
1388 * create a mapping at the low-vectors virtual address. in devicemaps_init()
1407 /* Now create a kernel read-only mapping */ in devicemaps_init()
1417 if (mdesc->map_io) in devicemaps_init()
1418 mdesc->map_io(); in devicemaps_init()
1429 * any write-allocated cache lines in the vector page are written in devicemaps_init()
1458 phys_addr_t start = reg->base; in map_lowmem()
1459 phys_addr_t end = start + reg->size; in map_lowmem()
1473 map.length = end - start; in map_lowmem()
1480 map.length = end - start; in map_lowmem()
1489 map.length = kernel_x_start - start; in map_lowmem()
1497 map.length = kernel_x_end - kernel_x_start; in map_lowmem()
1505 map.length = end - kernel_x_end; in map_lowmem()
1531 if (!mdesc->pv_fixup) in early_paging_init()
1534 offset = mdesc->pv_fixup(); in early_paging_init()
1552 /* Re-set the phys pfn offset, and the pv offset */ in early_paging_init()
1558 (&__pv_table_end - &__pv_table_begin) << 2); in early_paging_init()
1566 * allocating into the caches too. Note that this is ARMv7 LPAE in early_paging_init()
1577 * Fixup the page tables - this must be in the idmap region as in early_paging_init()
1584 /* Re-enable the caches and cacheable TLB walks */ in early_paging_init()
1595 if (!mdesc->pv_fixup) in early_paging_init()
1598 offset = mdesc->pv_fixup(); in early_paging_init()
1613 unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1); in early_fixmap_shutdown()
1667 kimage_voffset = (unsigned long)&kimage_voffset - virt_to_idmap(&kimage_voffset); in paging_init()