Lines Matching +full:linear +full:- +full:mapping +full:- +full:mode
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1995-2005 Russell King
23 #include <linux/dma-direct.h>
24 #include <linux/dma-map-ops.h>
38 #include <asm/kernel-pgtable.h>
47 #include <asm/xen/swiotlb-xen.h>
55 s64 memstart_addr __ro_after_init = -1;
60 * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
61 * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
62 * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
69 * reserve_crashkernel() - reserves memory for crash kernel
102 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", in reserve_crashkernel()
106 * The crashkernel memory will be removed from the kernel linear in reserve_crashkernel()
111 crashk_res.end = crash_base + crash_size - 1; in reserve_crashkernel()
121 * limit. If DRAM starts above 32-bit, expand the zone to the maximum
122 * available memory, otherwise cap it at 32-bit.
134 return min(zone_mask, memblock_end_of_DRAM() - 1) + 1; in max_zone_phys()
231 s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); in arm64_memblock_init()
234 * Corner case: 52-bit VA capable systems running KVM in nVHE mode may in arm64_memblock_init()
235 * be limited in their ability to support a linear map that exceeds 51 in arm64_memblock_init()
238 * limit the kernel's linear map to 51 bits as well if we detect this in arm64_memblock_init()
243 pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n"); in arm64_memblock_init()
256 if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size) in arm64_memblock_init()
257 pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n"); in arm64_memblock_init()
261 * linear mapping. Take care not to clip the kernel which may be in arm64_memblock_init()
268 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, in arm64_memblock_init()
274 * If we are running with a 52-bit kernel VA config on a system that in arm64_memblock_init()
276 * memory in the 48-bit addressable part of the linear region, i.e., in arm64_memblock_init()
281 memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52); in arm64_memblock_init()
286 * via the linear mapping. in arm64_memblock_init()
290 memblock_add(__pa_symbol(_text), (u64)(_end - _text)); in arm64_memblock_init()
296 * initrd to become inaccessible via the linear mapping. in arm64_memblock_init()
297 * Otherwise, this is a no-op in arm64_memblock_init()
300 u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base; in arm64_memblock_init()
304 * with more memory than we can address via the linear mapping. in arm64_memblock_init()
313 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { in arm64_memblock_init()
327 s64 range = linear_region_size - in arm64_memblock_init()
331 * If the size of the linear region exceeds, by a sufficient in arm64_memblock_init()
333 * span, randomize the linear region as well. in arm64_memblock_init()
337 memstart_addr -= ARM64_MEMSTART_ALIGN * in arm64_memblock_init()
346 memblock_reserve(__pa_symbol(_stext), _end - _stext); in arm64_memblock_init()
355 high_memory = __va(memblock_end_of_DRAM() - 1) + 1; in arm64_memblock_init()
419 set_max_mapnr(max_pfn - PHYS_PFN_OFFSET); in mem_init()