1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/io.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <asm/io-workarounds.h>
7 
8 unsigned long ioremap_bot;
9 EXPORT_SYMBOL(ioremap_bot);
10 
ioremap(phys_addr_t addr,unsigned long size)11 void __iomem *ioremap(phys_addr_t addr, unsigned long size)
12 {
13 	pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
14 	void *caller = __builtin_return_address(0);
15 
16 	if (iowa_is_active())
17 		return iowa_ioremap(addr, size, prot, caller);
18 	return __ioremap_caller(addr, size, prot, caller);
19 }
20 EXPORT_SYMBOL(ioremap);
21 
ioremap_wc(phys_addr_t addr,unsigned long size)22 void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size)
23 {
24 	pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
25 	void *caller = __builtin_return_address(0);
26 
27 	if (iowa_is_active())
28 		return iowa_ioremap(addr, size, prot, caller);
29 	return __ioremap_caller(addr, size, prot, caller);
30 }
31 EXPORT_SYMBOL(ioremap_wc);
32 
ioremap_coherent(phys_addr_t addr,unsigned long size)33 void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
34 {
35 	pgprot_t prot = pgprot_cached(PAGE_KERNEL);
36 	void *caller = __builtin_return_address(0);
37 
38 	if (iowa_is_active())
39 		return iowa_ioremap(addr, size, prot, caller);
40 	return __ioremap_caller(addr, size, prot, caller);
41 }
42 
ioremap_prot(phys_addr_t addr,unsigned long size,unsigned long flags)43 void __iomem *ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
44 {
45 	pte_t pte = __pte(flags);
46 	void *caller = __builtin_return_address(0);
47 
48 	/* writeable implies dirty for kernel addresses */
49 	if (pte_write(pte))
50 		pte = pte_mkdirty(pte);
51 
52 	/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
53 	pte = pte_exprotect(pte);
54 	pte = pte_mkprivileged(pte);
55 
56 	if (iowa_is_active())
57 		return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
58 	return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
59 }
60 EXPORT_SYMBOL(ioremap_prot);
61 
early_ioremap_range(unsigned long ea,phys_addr_t pa,unsigned long size,pgprot_t prot)62 int early_ioremap_range(unsigned long ea, phys_addr_t pa,
63 			unsigned long size, pgprot_t prot)
64 {
65 	unsigned long i;
66 
67 	for (i = 0; i < size; i += PAGE_SIZE) {
68 		int err = map_kernel_page(ea + i, pa + i, prot);
69 
70 		if (WARN_ON_ONCE(err))  /* Should clean up */
71 			return err;
72 	}
73 
74 	return 0;
75 }
76 
do_ioremap(phys_addr_t pa,phys_addr_t offset,unsigned long size,pgprot_t prot,void * caller)77 void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
78 			 pgprot_t prot, void *caller)
79 {
80 	struct vm_struct *area;
81 	int ret;
82 	unsigned long va;
83 
84 	area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller);
85 	if (area == NULL)
86 		return NULL;
87 
88 	area->phys_addr = pa;
89 	va = (unsigned long)area->addr;
90 
91 	ret = ioremap_page_range(va, va + size, pa, prot);
92 	if (!ret)
93 		return (void __iomem *)area->addr + offset;
94 
95 	unmap_kernel_range(va, size);
96 	free_vm_area(area);
97 
98 	return NULL;
99 }
100