1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <linux/io.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6
7 /**
8 * Low level function to establish the page tables for an IO mapping
9 */
__ioremap_at(phys_addr_t pa,void * ea,unsigned long size,pgprot_t prot)10 void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
11 {
12 int ret;
13 unsigned long va = (unsigned long)ea;
14
15 /* We don't support the 4K PFN hack with ioremap */
16 if (pgprot_val(prot) & H_PAGE_4K_PFN)
17 return NULL;
18
19 if ((ea + size) >= (void *)IOREMAP_END) {
20 pr_warn("Outside the supported range\n");
21 return NULL;
22 }
23
24 WARN_ON(pa & ~PAGE_MASK);
25 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
26 WARN_ON(size & ~PAGE_MASK);
27
28 if (slab_is_available()) {
29 ret = ioremap_page_range(va, va + size, pa, prot);
30 if (ret)
31 unmap_kernel_range(va, size);
32 } else {
33 ret = early_ioremap_range(va, pa, size, prot);
34 }
35
36 if (ret)
37 return NULL;
38
39 return (void __iomem *)ea;
40 }
41 EXPORT_SYMBOL(__ioremap_at);
42
43 /**
44 * Low level function to tear down the page tables for an IO mapping. This is
45 * used for mappings that are manipulated manually, like partial unmapping of
46 * PCI IOs or ISA space.
47 */
__iounmap_at(void * ea,unsigned long size)48 void __iounmap_at(void *ea, unsigned long size)
49 {
50 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
51 WARN_ON(size & ~PAGE_MASK);
52
53 unmap_kernel_range((unsigned long)ea, size);
54 }
55 EXPORT_SYMBOL(__iounmap_at);
56
__ioremap_caller(phys_addr_t addr,unsigned long size,pgprot_t prot,void * caller)57 void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
58 pgprot_t prot, void *caller)
59 {
60 phys_addr_t paligned, offset;
61 void __iomem *ret;
62 int err;
63
64 /* We don't support the 4K PFN hack with ioremap */
65 if (pgprot_val(prot) & H_PAGE_4K_PFN)
66 return NULL;
67
68 /*
69 * Choose an address to map it to. Once the vmalloc system is running,
70 * we use it. Before that, we map using addresses going up from
71 * ioremap_bot. vmalloc will use the addresses from IOREMAP_BASE
72 * through ioremap_bot.
73 */
74 paligned = addr & PAGE_MASK;
75 offset = addr & ~PAGE_MASK;
76 size = PAGE_ALIGN(addr + size) - paligned;
77
78 if (size == 0 || paligned == 0)
79 return NULL;
80
81 if (slab_is_available())
82 return do_ioremap(paligned, offset, size, prot, caller);
83
84 err = early_ioremap_range(ioremap_bot, paligned, size, prot);
85 if (err)
86 return NULL;
87
88 ret = (void __iomem *)ioremap_bot + offset;
89 ioremap_bot += size;
90
91 return ret;
92 }
93
94 /*
95 * Unmap an IO region and remove it from vmalloc'd list.
96 * Access to IO memory should be serialized by driver.
97 */
iounmap(volatile void __iomem * token)98 void iounmap(volatile void __iomem *token)
99 {
100 void *addr;
101
102 if (!slab_is_available())
103 return;
104
105 addr = (void *)((unsigned long __force)PCI_FIX_ADDR(token) & PAGE_MASK);
106
107 if ((unsigned long)addr < ioremap_bot) {
108 pr_warn("Attempt to iounmap early bolted mapping at 0x%p\n", addr);
109 return;
110 }
111 vunmap(addr);
112 }
113 EXPORT_SYMBOL(iounmap);
114