Lines Matching +full:data +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Re-map IO memory to kernel address space so that we can access it.
5 * 640k-1MB IO memory area on PC's
44 int ioremap_change_attr(unsigned long vaddr, unsigned long size, in ioremap_change_attr() argument
47 unsigned long nrpages = size >> PAGE_SHIFT; in ioremap_change_attr()
75 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM) in __ioremap_check_ram()
78 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT; in __ioremap_check_ram()
79 stop_pfn = (res->end + 1) >> PAGE_SHIFT; in __ioremap_check_ram()
81 for (i = 0; i < (stop_pfn - start_pfn); ++i) in __ioremap_check_ram()
99 switch (res->desc) { in __ioremap_check_encrypted()
111 * The EFI runtime services data area is not covered by walk_mem_res(), but must
125 desc->flags |= IORES_MAP_ENCRYPTED; in __ioremap_check_other()
132 if (!(desc->flags & IORES_MAP_SYSTEM_RAM)) in __ioremap_collect_map_flags()
133 desc->flags |= __ioremap_check_ram(res); in __ioremap_collect_map_flags()
135 if (!(desc->flags & IORES_MAP_ENCRYPTED)) in __ioremap_collect_map_flags()
136 desc->flags |= __ioremap_check_encrypted(res); in __ioremap_collect_map_flags()
138 return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) == in __ioremap_collect_map_flags()
150 static void __ioremap_check_mem(resource_size_t addr, unsigned long size, in __ioremap_check_mem() argument
156 end = start + size - 1; in __ioremap_check_mem()
167 * the physical address is aligned by a huge page size (1GB or 2MB) and
168 * the requested size is at least the huge page size.
172 * when a mapping range is covered by non-WB type of MTRRs.
174 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
175 * have to convert them into an offset in a page-aligned mapping, but the
179 __ioremap_caller(resource_size_t phys_addr, unsigned long size, in __ioremap_caller() argument
185 const unsigned long unaligned_size = size; in __ioremap_caller()
193 /* Don't allow wraparound or zero size */ in __ioremap_caller()
194 last_addr = phys_addr + size - 1; in __ioremap_caller()
195 if (!size || last_addr < phys_addr) in __ioremap_caller()
205 __ioremap_check_mem(phys_addr, size, &io_desc); in __ioremap_caller()
211 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", in __ioremap_caller()
217 * Mappings have to be page-aligned in __ioremap_caller()
221 size = PAGE_ALIGN(last_addr+1) - phys_addr; in __ioremap_caller()
229 retval = memtype_reserve(phys_addr, (u64)phys_addr + size, in __ioremap_caller()
237 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { in __ioremap_caller()
239 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n", in __ioremap_caller()
241 (unsigned long long)(phys_addr + size), in __ioremap_caller()
287 area = get_vm_area_caller(size, VM_IOREMAP, caller); in __ioremap_caller()
290 area->phys_addr = phys_addr; in __ioremap_caller()
291 vaddr = (unsigned long) area->addr; in __ioremap_caller()
293 if (memtype_kernel_map_sync(phys_addr, size, pcm)) in __ioremap_caller()
296 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) in __ioremap_caller()
313 memtype_free(phys_addr, phys_addr + size); in __ioremap_caller()
318 * ioremap - map bus memory into CPU space
320 * @size: size of the resource to map
338 void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) in ioremap() argument
350 return __ioremap_caller(phys_addr, size, pcm, in ioremap()
356 * ioremap_uc - map bus memory into CPU space as strongly uncachable
358 * @size: size of the resource to map
367 * preference as completely uncachable on the CPU when possible. For non-PAT
368 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
379 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size) in ioremap_uc() argument
383 return __ioremap_caller(phys_addr, size, pcm, in ioremap_uc()
389 * ioremap_wc - map memory into CPU space write combined
391 * @size: size of the resource to map
398 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) in ioremap_wc() argument
400 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, in ioremap_wc()
406 * ioremap_wt - map memory into CPU space write through
408 * @size: size of the resource to map
411 * Write through stores data into memory while keeping the cache up-to-date.
415 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size) in ioremap_wt() argument
417 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT, in ioremap_wt()
422 void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size) in ioremap_encrypted() argument
424 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, in ioremap_encrypted()
429 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) in ioremap_cache() argument
431 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, in ioremap_cache()
436 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, in ioremap_prot() argument
439 return __ioremap_caller(phys_addr, size, in ioremap_prot()
446 * iounmap - Free a IO remapping
459 * The PCI/ISA range special-casing was removed from __ioremap() in iounmap()
491 memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p)); in iounmap()
538 unsigned long size) in memremap_should_map_decrypted() argument
546 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM, in memremap_should_map_decrypted()
552 * Check if the non-volatile attribute is set for an EFI in memremap_should_map_decrypted()
567 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) { in memremap_should_map_decrypted()
587 * Examine the physical address to determine if it is EFI data. Check
591 unsigned long size) in memremap_is_efi_data() argument
595 /* Check if the address is part of EFI boot/runtime data */ in memremap_is_efi_data()
626 * Examine the physical address to determine if it is boot data by checking
630 unsigned long size) in memremap_is_setup_data() argument
633 struct setup_data *data; in memremap_is_setup_data() local
643 data = memremap(paddr, sizeof(*data), in memremap_is_setup_data()
645 if (!data) { in memremap_is_setup_data()
650 paddr_next = data->next; in memremap_is_setup_data()
651 len = data->len; in memremap_is_setup_data()
654 memunmap(data); in memremap_is_setup_data()
658 if (data->type == SETUP_INDIRECT) { in memremap_is_setup_data()
659 memunmap(data); in memremap_is_setup_data()
660 data = memremap(paddr, sizeof(*data) + len, in memremap_is_setup_data()
662 if (!data) { in memremap_is_setup_data()
667 indirect = (struct setup_indirect *)data->data; in memremap_is_setup_data()
669 if (indirect->type != SETUP_INDIRECT) { in memremap_is_setup_data()
670 paddr = indirect->addr; in memremap_is_setup_data()
671 len = indirect->len; in memremap_is_setup_data()
675 memunmap(data); in memremap_is_setup_data()
687 * Examine the physical address to determine if it is boot data by checking
691 unsigned long size) in early_memremap_is_setup_data() argument
694 struct setup_data *data; in early_memremap_is_setup_data() local
699 unsigned int len, size; in early_memremap_is_setup_data() local
704 data = early_memremap_decrypted(paddr, sizeof(*data)); in early_memremap_is_setup_data()
705 if (!data) { in early_memremap_is_setup_data()
710 size = sizeof(*data); in early_memremap_is_setup_data()
712 paddr_next = data->next; in early_memremap_is_setup_data()
713 len = data->len; in early_memremap_is_setup_data()
716 early_memunmap(data, sizeof(*data)); in early_memremap_is_setup_data()
720 if (data->type == SETUP_INDIRECT) { in early_memremap_is_setup_data()
721 size += len; in early_memremap_is_setup_data()
722 early_memunmap(data, sizeof(*data)); in early_memremap_is_setup_data()
723 data = early_memremap_decrypted(paddr, size); in early_memremap_is_setup_data()
724 if (!data) { in early_memremap_is_setup_data()
729 indirect = (struct setup_indirect *)data->data; in early_memremap_is_setup_data()
731 if (indirect->type != SETUP_INDIRECT) { in early_memremap_is_setup_data()
732 paddr = indirect->addr; in early_memremap_is_setup_data()
733 len = indirect->len; in early_memremap_is_setup_data()
737 early_memunmap(data, size); in early_memremap_is_setup_data()
750 * RAM remap will map the data as encrypted. Determine if a RAM remap should
751 * not be done so that the data will be mapped decrypted.
753 bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, in arch_memremap_can_ram_remap() argument
766 if (memremap_is_setup_data(phys_addr, size) || in arch_memremap_can_ram_remap()
767 memremap_is_efi_data(phys_addr, size)) in arch_memremap_can_ram_remap()
771 return !memremap_should_map_decrypted(phys_addr, size); in arch_memremap_can_ram_remap()
776 * used when remapping memory. By default, early_memremap() will map the data
781 unsigned long size, in early_memremap_pgprot_adjust() argument
792 if (early_memremap_is_setup_data(phys_addr, size) || in early_memremap_pgprot_adjust()
793 memremap_is_efi_data(phys_addr, size)) in early_memremap_pgprot_adjust()
797 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size)) in early_memremap_pgprot_adjust()
804 bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) in phys_mem_access_encrypted() argument
806 return arch_memremap_can_ram_remap(phys_addr, size, 0); in phys_mem_access_encrypted()
811 unsigned long size) in early_memremap_encrypted() argument
813 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC); in early_memremap_encrypted()
817 * Remap memory with encryption and write-protected - cannot be called
821 unsigned long size) in early_memremap_encrypted_wp() argument
825 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP); in early_memremap_encrypted_wp()
830 unsigned long size) in early_memremap_decrypted() argument
832 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC); in early_memremap_decrypted()
836 * Remap memory without encryption and write-protected - cannot be called
840 unsigned long size) in early_memremap_decrypted_wp() argument
844 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP); in early_memremap_decrypted_wp()
877 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); in early_ioremap_init()
879 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); in early_ioremap_init()
889 * The boot-ioremap range spans multiple pmds, for which in early_ioremap_init()
892 #define __FIXADDR_TOP (-PAGE_SIZE) in early_ioremap_init()