1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Virtual DMA allocation
4 *
5 * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
6 *
7 * 11/26/2000 -- disabled the existing code because it didn't work for
8 * me in 2.4. Replaced with a significantly more primitive version
9 * similar to the sun3 code. the old functionality was probably more
10 * desirable, but.... -- Sam Creasey (sammy@oh.verio.com)
11 *
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/mm.h>
18 #include <linux/memblock.h>
19 #include <linux/vmalloc.h>
20
21 #include <asm/sun3x.h>
22 #include <asm/dvma.h>
23 #include <asm/io.h>
24 #include <asm/page.h>
25 #include <asm/pgtable.h>
26 #include <asm/pgalloc.h>
27
28 /* IOMMU support */
29
30 #define IOMMU_ADDR_MASK 0x03ffe000
31 #define IOMMU_CACHE_INHIBIT 0x00000040
32 #define IOMMU_FULL_BLOCK 0x00000020
33 #define IOMMU_MODIFIED 0x00000010
34 #define IOMMU_USED 0x00000008
35 #define IOMMU_WRITE_PROTECT 0x00000004
36 #define IOMMU_DT_MASK 0x00000003
37 #define IOMMU_DT_INVALID 0x00000000
38 #define IOMMU_DT_VALID 0x00000001
39 #define IOMMU_DT_BAD 0x00000002
40
41
42 static volatile unsigned long *iommu_pte = (unsigned long *)SUN3X_IOMMU;
43
44
45 #define dvma_entry_paddr(index) (iommu_pte[index] & IOMMU_ADDR_MASK)
46 #define dvma_entry_vaddr(index,paddr) ((index << DVMA_PAGE_SHIFT) | \
47 (paddr & (DVMA_PAGE_SIZE-1)))
48 #if 0
49 #define dvma_entry_set(index,addr) (iommu_pte[index] = \
50 (addr & IOMMU_ADDR_MASK) | \
51 IOMMU_DT_VALID | IOMMU_CACHE_INHIBIT)
52 #else
53 #define dvma_entry_set(index,addr) (iommu_pte[index] = \
54 (addr & IOMMU_ADDR_MASK) | \
55 IOMMU_DT_VALID)
56 #endif
57 #define dvma_entry_clr(index) (iommu_pte[index] = IOMMU_DT_INVALID)
58 #define dvma_entry_hash(addr) ((addr >> DVMA_PAGE_SHIFT) ^ \
59 ((addr & 0x03c00000) >> \
60 (DVMA_PAGE_SHIFT+4)))
61
62 #ifdef DEBUG
63 /* code to print out a dvma mapping for debugging purposes */
dvma_print(unsigned long dvma_addr)64 void dvma_print (unsigned long dvma_addr)
65 {
66
67 unsigned long index;
68
69 index = dvma_addr >> DVMA_PAGE_SHIFT;
70
71 pr_info("idx %lx dvma_addr %08lx paddr %08lx\n", index, dvma_addr,
72 dvma_entry_paddr(index));
73 }
74 #endif
75
76
77 /* create a virtual mapping for a page assigned within the IOMMU
78 so that the cpu can reach it easily */
dvma_map_cpu(unsigned long kaddr,unsigned long vaddr,int len)79 inline int dvma_map_cpu(unsigned long kaddr,
80 unsigned long vaddr, int len)
81 {
82 pgd_t *pgd;
83 unsigned long end;
84 int ret = 0;
85
86 kaddr &= PAGE_MASK;
87 vaddr &= PAGE_MASK;
88
89 end = PAGE_ALIGN(vaddr + len);
90
91 pr_debug("dvma: mapping kern %08lx to virt %08lx\n", kaddr, vaddr);
92 pgd = pgd_offset_k(vaddr);
93
94 do {
95 pmd_t *pmd;
96 unsigned long end2;
97
98 if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) {
99 ret = -ENOMEM;
100 goto out;
101 }
102
103 if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK))
104 end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK;
105 else
106 end2 = end;
107
108 do {
109 pte_t *pte;
110 unsigned long end3;
111
112 if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) {
113 ret = -ENOMEM;
114 goto out;
115 }
116
117 if((end2 & PMD_MASK) > (vaddr & PMD_MASK))
118 end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK;
119 else
120 end3 = end2;
121
122 do {
123 pr_debug("mapping %08lx phys to %08lx\n",
124 __pa(kaddr), vaddr);
125 set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
126 PAGE_KERNEL));
127 pte++;
128 kaddr += PAGE_SIZE;
129 vaddr += PAGE_SIZE;
130 } while(vaddr < end3);
131
132 } while(vaddr < end2);
133
134 } while(vaddr < end);
135
136 flush_tlb_all();
137
138 out:
139 return ret;
140 }
141
142
dvma_map_iommu(unsigned long kaddr,unsigned long baddr,int len)143 inline int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
144 int len)
145 {
146 unsigned long end, index;
147
148 index = baddr >> DVMA_PAGE_SHIFT;
149 end = ((baddr+len) >> DVMA_PAGE_SHIFT);
150
151 if(len & ~DVMA_PAGE_MASK)
152 end++;
153
154 for(; index < end ; index++) {
155 // if(dvma_entry_use(index))
156 // BUG();
157 // pr_info("mapping pa %lx to ba %lx\n", __pa(kaddr),
158 // index << DVMA_PAGE_SHIFT);
159
160 dvma_entry_set(index, __pa(kaddr));
161
162 iommu_pte[index] |= IOMMU_FULL_BLOCK;
163 // dvma_entry_inc(index);
164
165 kaddr += DVMA_PAGE_SIZE;
166 }
167
168 #ifdef DEBUG
169 for(index = (baddr >> DVMA_PAGE_SHIFT); index < end; index++)
170 dvma_print(index << DVMA_PAGE_SHIFT);
171 #endif
172 return 0;
173
174 }
175
dvma_unmap_iommu(unsigned long baddr,int len)176 void dvma_unmap_iommu(unsigned long baddr, int len)
177 {
178
179 int index, end;
180
181
182 index = baddr >> DVMA_PAGE_SHIFT;
183 end = (DVMA_PAGE_ALIGN(baddr+len) >> DVMA_PAGE_SHIFT);
184
185 for(; index < end ; index++) {
186 pr_debug("freeing bus mapping %08x\n",
187 index << DVMA_PAGE_SHIFT);
188 #if 0
189 if(!dvma_entry_use(index))
190 pr_info("dvma_unmap freeing unused entry %04x\n",
191 index);
192 else
193 dvma_entry_dec(index);
194 #endif
195 dvma_entry_clr(index);
196 }
197
198 }
199
200