1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	linux/arch/alpha/kernel/pci_iommu.c
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/mm.h>
8 #include <linux/pci.h>
9 #include <linux/gfp.h>
10 #include <linux/memblock.h>
11 #include <linux/export.h>
12 #include <linux/scatterlist.h>
13 #include <linux/log2.h>
14 #include <linux/dma-map-ops.h>
15 #include <linux/iommu-helper.h>
16 
17 #include <asm/io.h>
18 #include <asm/hwrpb.h>
19 
20 #include "proto.h"
21 #include "pci_impl.h"
22 
23 
24 #define DEBUG_ALLOC 0
25 #if DEBUG_ALLOC > 0
26 # define DBGA(args...)		printk(KERN_DEBUG args)
27 #else
28 # define DBGA(args...)
29 #endif
30 #if DEBUG_ALLOC > 1
31 # define DBGA2(args...)		printk(KERN_DEBUG args)
32 #else
33 # define DBGA2(args...)
34 #endif
35 
36 #define DEBUG_NODIRECT 0
37 
38 #define ISA_DMA_MASK		0x00ffffff
39 
40 static inline unsigned long
mk_iommu_pte(unsigned long paddr)41 mk_iommu_pte(unsigned long paddr)
42 {
43 	return (paddr >> (PAGE_SHIFT-1)) | 1;
44 }
45 
46 /* Return the minimum of MAX or the first power of two larger
47    than main memory.  */
48 
49 unsigned long
size_for_memory(unsigned long max)50 size_for_memory(unsigned long max)
51 {
52 	unsigned long mem = max_low_pfn << PAGE_SHIFT;
53 	if (mem < max)
54 		max = roundup_pow_of_two(mem);
55 	return max;
56 }
57 
58 struct pci_iommu_arena * __init
iommu_arena_new_node(int nid,struct pci_controller * hose,dma_addr_t base,unsigned long window_size,unsigned long align)59 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
60 		     unsigned long window_size, unsigned long align)
61 {
62 	unsigned long mem_size;
63 	struct pci_iommu_arena *arena;
64 
65 	mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
66 
67 	/* Note that the TLB lookup logic uses bitwise concatenation,
68 	   not addition, so the required arena alignment is based on
69 	   the size of the window.  Retain the align parameter so that
70 	   particular systems can over-align the arena.  */
71 	if (align < mem_size)
72 		align = mem_size;
73 
74 
75 #ifdef CONFIG_DISCONTIGMEM
76 
77 	arena = memblock_alloc_node(sizeof(*arena), align, nid);
78 	if (!NODE_DATA(nid) || !arena) {
79 		printk("%s: couldn't allocate arena from node %d\n"
80 		       "    falling back to system-wide allocation\n",
81 		       __func__, nid);
82 		arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
83 		if (!arena)
84 			panic("%s: Failed to allocate %zu bytes\n", __func__,
85 			      sizeof(*arena));
86 	}
87 
88 	arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
89 	if (!NODE_DATA(nid) || !arena->ptes) {
90 		printk("%s: couldn't allocate arena ptes from node %d\n"
91 		       "    falling back to system-wide allocation\n",
92 		       __func__, nid);
93 		arena->ptes = memblock_alloc(mem_size, align);
94 		if (!arena->ptes)
95 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
96 			      __func__, mem_size, align);
97 	}
98 
99 #else /* CONFIG_DISCONTIGMEM */
100 
101 	arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
102 	if (!arena)
103 		panic("%s: Failed to allocate %zu bytes\n", __func__,
104 		      sizeof(*arena));
105 	arena->ptes = memblock_alloc(mem_size, align);
106 	if (!arena->ptes)
107 		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
108 		      __func__, mem_size, align);
109 
110 #endif /* CONFIG_DISCONTIGMEM */
111 
112 	spin_lock_init(&arena->lock);
113 	arena->hose = hose;
114 	arena->dma_base = base;
115 	arena->size = window_size;
116 	arena->next_entry = 0;
117 
118 	/* Align allocations to a multiple of a page size.  Not needed
119 	   unless there are chip bugs.  */
120 	arena->align_entry = 1;
121 
122 	return arena;
123 }
124 
125 struct pci_iommu_arena * __init
iommu_arena_new(struct pci_controller * hose,dma_addr_t base,unsigned long window_size,unsigned long align)126 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
127 		unsigned long window_size, unsigned long align)
128 {
129 	return iommu_arena_new_node(0, hose, base, window_size, align);
130 }
131 
132 /* Must be called with the arena lock held */
133 static long
iommu_arena_find_pages(struct device * dev,struct pci_iommu_arena * arena,long n,long mask)134 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
135 		       long n, long mask)
136 {
137 	unsigned long *ptes;
138 	long i, p, nent;
139 	int pass = 0;
140 	unsigned long base;
141 	unsigned long boundary_size;
142 
143 	base = arena->dma_base >> PAGE_SHIFT;
144 	boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
145 
146 	/* Search forward for the first mask-aligned sequence of N free ptes */
147 	ptes = arena->ptes;
148 	nent = arena->size >> PAGE_SHIFT;
149 	p = ALIGN(arena->next_entry, mask + 1);
150 	i = 0;
151 
152 again:
153 	while (i < n && p+i < nent) {
154 		if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
155 			p = ALIGN(p + 1, mask + 1);
156 			goto again;
157 		}
158 
159 		if (ptes[p+i])
160 			p = ALIGN(p + i + 1, mask + 1), i = 0;
161 		else
162 			i = i + 1;
163 	}
164 
165 	if (i < n) {
166 		if (pass < 1) {
167 			/*
168 			 * Reached the end.  Flush the TLB and restart
169 			 * the search from the beginning.
170 			*/
171 			alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
172 
173 			pass++;
174 			p = 0;
175 			i = 0;
176 			goto again;
177 		} else
178 			return -1;
179 	}
180 
181 	/* Success. It's the responsibility of the caller to mark them
182 	   in use before releasing the lock */
183 	return p;
184 }
185 
186 static long
iommu_arena_alloc(struct device * dev,struct pci_iommu_arena * arena,long n,unsigned int align)187 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
188 		  unsigned int align)
189 {
190 	unsigned long flags;
191 	unsigned long *ptes;
192 	long i, p, mask;
193 
194 	spin_lock_irqsave(&arena->lock, flags);
195 
196 	/* Search for N empty ptes */
197 	ptes = arena->ptes;
198 	mask = max(align, arena->align_entry) - 1;
199 	p = iommu_arena_find_pages(dev, arena, n, mask);
200 	if (p < 0) {
201 		spin_unlock_irqrestore(&arena->lock, flags);
202 		return -1;
203 	}
204 
205 	/* Success.  Mark them all in use, ie not zero and invalid
206 	   for the iommu tlb that could load them from under us.
207 	   The chip specific bits will fill this in with something
208 	   kosher when we return.  */
209 	for (i = 0; i < n; ++i)
210 		ptes[p+i] = IOMMU_INVALID_PTE;
211 
212 	arena->next_entry = p + n;
213 	spin_unlock_irqrestore(&arena->lock, flags);
214 
215 	return p;
216 }
217 
218 static void
iommu_arena_free(struct pci_iommu_arena * arena,long ofs,long n)219 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
220 {
221 	unsigned long *p;
222 	long i;
223 
224 	p = arena->ptes + ofs;
225 	for (i = 0; i < n; ++i)
226 		p[i] = 0;
227 }
228 
229 /*
230  * True if the machine supports DAC addressing, and DEV can
231  * make use of it given MASK.
232  */
pci_dac_dma_supported(struct pci_dev * dev,u64 mask)233 static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
234 {
235 	dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
236 	int ok = 1;
237 
238 	/* If this is not set, the machine doesn't support DAC at all.  */
239 	if (dac_offset == 0)
240 		ok = 0;
241 
242 	/* The device has to be able to address our DAC bit.  */
243 	if ((dac_offset & dev->dma_mask) != dac_offset)
244 		ok = 0;
245 
246 	/* If both conditions above are met, we are fine. */
247 	DBGA("pci_dac_dma_supported %s from %ps\n",
248 	     ok ? "yes" : "no", __builtin_return_address(0));
249 
250 	return ok;
251 }
252 
253 /* Map a single buffer of the indicated size for PCI DMA in streaming
254    mode.  The 32-bit PCI bus mastering address to use is returned.
255    Once the device is given the dma address, the device owns this memory
256    until either pci_unmap_single or pci_dma_sync_single is performed.  */
257 
258 static dma_addr_t
pci_map_single_1(struct pci_dev * pdev,void * cpu_addr,size_t size,int dac_allowed)259 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
260 		 int dac_allowed)
261 {
262 	struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
263 	dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
264 	struct pci_iommu_arena *arena;
265 	long npages, dma_ofs, i;
266 	unsigned long paddr;
267 	dma_addr_t ret;
268 	unsigned int align = 0;
269 	struct device *dev = pdev ? &pdev->dev : NULL;
270 
271 	paddr = __pa(cpu_addr);
272 
273 #if !DEBUG_NODIRECT
274 	/* First check to see if we can use the direct map window.  */
275 	if (paddr + size + __direct_map_base - 1 <= max_dma
276 	    && paddr + size <= __direct_map_size) {
277 		ret = paddr + __direct_map_base;
278 
279 		DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n",
280 		      cpu_addr, size, ret, __builtin_return_address(0));
281 
282 		return ret;
283 	}
284 #endif
285 
286 	/* Next, use DAC if selected earlier.  */
287 	if (dac_allowed) {
288 		ret = paddr + alpha_mv.pci_dac_offset;
289 
290 		DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n",
291 		      cpu_addr, size, ret, __builtin_return_address(0));
292 
293 		return ret;
294 	}
295 
296 	/* If the machine doesn't define a pci_tbi routine, we have to
297 	   assume it doesn't support sg mapping, and, since we tried to
298 	   use direct_map above, it now must be considered an error. */
299 	if (! alpha_mv.mv_pci_tbi) {
300 		printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
301 		return DMA_MAPPING_ERROR;
302 	}
303 
304 	arena = hose->sg_pci;
305 	if (!arena || arena->dma_base + arena->size - 1 > max_dma)
306 		arena = hose->sg_isa;
307 
308 	npages = iommu_num_pages(paddr, size, PAGE_SIZE);
309 
310 	/* Force allocation to 64KB boundary for ISA bridges. */
311 	if (pdev && pdev == isa_bridge)
312 		align = 8;
313 	dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
314 	if (dma_ofs < 0) {
315 		printk(KERN_WARNING "pci_map_single failed: "
316 		       "could not allocate dma page tables\n");
317 		return DMA_MAPPING_ERROR;
318 	}
319 
320 	paddr &= PAGE_MASK;
321 	for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
322 		arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
323 
324 	ret = arena->dma_base + dma_ofs * PAGE_SIZE;
325 	ret += (unsigned long)cpu_addr & ~PAGE_MASK;
326 
327 	DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n",
328 	      cpu_addr, size, npages, ret, __builtin_return_address(0));
329 
330 	return ret;
331 }
332 
333 /* Helper for generic DMA-mapping functions. */
alpha_gendev_to_pci(struct device * dev)334 static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
335 {
336 	if (dev && dev_is_pci(dev))
337 		return to_pci_dev(dev);
338 
339 	/* Assume that non-PCI devices asking for DMA are either ISA or EISA,
340 	   BUG() otherwise. */
341 	BUG_ON(!isa_bridge);
342 
343 	/* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
344 	   bridge is bus master then). */
345 	if (!dev || !dev->dma_mask || !*dev->dma_mask)
346 		return isa_bridge;
347 
348 	/* For EISA bus masters, return isa_bridge (it might have smaller
349 	   dma_mask due to wiring limitations). */
350 	if (*dev->dma_mask >= isa_bridge->dma_mask)
351 		return isa_bridge;
352 
353 	/* This assumes ISA bus master with dma_mask 0xffffff. */
354 	return NULL;
355 }
356 
alpha_pci_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)357 static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
358 				     unsigned long offset, size_t size,
359 				     enum dma_data_direction dir,
360 				     unsigned long attrs)
361 {
362 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
363 	int dac_allowed;
364 
365 	BUG_ON(dir == PCI_DMA_NONE);
366 
367 	dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
368 	return pci_map_single_1(pdev, (char *)page_address(page) + offset,
369 				size, dac_allowed);
370 }
371 
372 /* Unmap a single streaming mode DMA translation.  The DMA_ADDR and
373    SIZE must match what was provided for in a previous pci_map_single
374    call.  All other usages are undefined.  After this call, reads by
375    the cpu to the buffer are guaranteed to see whatever the device
376    wrote there.  */
377 
alpha_pci_unmap_page(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)378 static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
379 				 size_t size, enum dma_data_direction dir,
380 				 unsigned long attrs)
381 {
382 	unsigned long flags;
383 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
384 	struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
385 	struct pci_iommu_arena *arena;
386 	long dma_ofs, npages;
387 
388 	BUG_ON(dir == PCI_DMA_NONE);
389 
390 	if (dma_addr >= __direct_map_base
391 	    && dma_addr < __direct_map_base + __direct_map_size) {
392 		/* Nothing to do.  */
393 
394 		DBGA2("pci_unmap_single: direct [%llx,%zx] from %ps\n",
395 		      dma_addr, size, __builtin_return_address(0));
396 
397 		return;
398 	}
399 
400 	if (dma_addr > 0xffffffff) {
401 		DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n",
402 		      dma_addr, size, __builtin_return_address(0));
403 		return;
404 	}
405 
406 	arena = hose->sg_pci;
407 	if (!arena || dma_addr < arena->dma_base)
408 		arena = hose->sg_isa;
409 
410 	dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
411 	if (dma_ofs * PAGE_SIZE >= arena->size) {
412 		printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
413 		       " base %llx size %x\n",
414 		       dma_addr, arena->dma_base, arena->size);
415 		return;
416 		BUG();
417 	}
418 
419 	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
420 
421 	spin_lock_irqsave(&arena->lock, flags);
422 
423 	iommu_arena_free(arena, dma_ofs, npages);
424 
425         /* If we're freeing ptes above the `next_entry' pointer (they
426            may have snuck back into the TLB since the last wrap flush),
427            we need to flush the TLB before reallocating the latter.  */
428 	if (dma_ofs >= arena->next_entry)
429 		alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
430 
431 	spin_unlock_irqrestore(&arena->lock, flags);
432 
433 	DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %ps\n",
434 	      dma_addr, size, npages, __builtin_return_address(0));
435 }
436 
437 /* Allocate and map kernel buffer using consistent mode DMA for PCI
438    device.  Returns non-NULL cpu-view pointer to the buffer if
439    successful and sets *DMA_ADDRP to the pci side dma address as well,
440    else DMA_ADDRP is undefined.  */
441 
alpha_pci_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_addrp,gfp_t gfp,unsigned long attrs)442 static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
443 				      dma_addr_t *dma_addrp, gfp_t gfp,
444 				      unsigned long attrs)
445 {
446 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
447 	void *cpu_addr;
448 	long order = get_order(size);
449 
450 	gfp &= ~GFP_DMA;
451 
452 try_again:
453 	cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
454 	if (! cpu_addr) {
455 		printk(KERN_INFO "pci_alloc_consistent: "
456 		       "get_free_pages failed from %ps\n",
457 			__builtin_return_address(0));
458 		/* ??? Really atomic allocation?  Otherwise we could play
459 		   with vmalloc and sg if we can't find contiguous memory.  */
460 		return NULL;
461 	}
462 	memset(cpu_addr, 0, size);
463 
464 	*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
465 	if (*dma_addrp == DMA_MAPPING_ERROR) {
466 		free_pages((unsigned long)cpu_addr, order);
467 		if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
468 			return NULL;
469 		/* The address doesn't fit required mask and we
470 		   do not have iommu. Try again with GFP_DMA. */
471 		gfp |= GFP_DMA;
472 		goto try_again;
473 	}
474 
475 	DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n",
476 	      size, cpu_addr, *dma_addrp, __builtin_return_address(0));
477 
478 	return cpu_addr;
479 }
480 
481 /* Free and unmap a consistent DMA buffer.  CPU_ADDR and DMA_ADDR must
482    be values that were returned from pci_alloc_consistent.  SIZE must
483    be the same as what as passed into pci_alloc_consistent.
484    References to the memory and mappings associated with CPU_ADDR or
485    DMA_ADDR past this call are illegal.  */
486 
alpha_pci_free_coherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr,unsigned long attrs)487 static void alpha_pci_free_coherent(struct device *dev, size_t size,
488 				    void *cpu_addr, dma_addr_t dma_addr,
489 				    unsigned long attrs)
490 {
491 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
492 	pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
493 	free_pages((unsigned long)cpu_addr, get_order(size));
494 
495 	DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n",
496 	      dma_addr, size, __builtin_return_address(0));
497 }
498 
499 /* Classify the elements of the scatterlist.  Write dma_address
500    of each element with:
501 	0   : Followers all physically adjacent.
502 	1   : Followers all virtually adjacent.
503 	-1  : Not leader, physically adjacent to previous.
504 	-2  : Not leader, virtually adjacent to previous.
505    Write dma_length of each leader with the combined lengths of
506    the mergable followers.  */
507 
508 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
509 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
510 
511 static void
sg_classify(struct device * dev,struct scatterlist * sg,struct scatterlist * end,int virt_ok)512 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
513 	    int virt_ok)
514 {
515 	unsigned long next_paddr;
516 	struct scatterlist *leader;
517 	long leader_flag, leader_length;
518 	unsigned int max_seg_size;
519 
520 	leader = sg;
521 	leader_flag = 0;
522 	leader_length = leader->length;
523 	next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
524 
525 	/* we will not marge sg without device. */
526 	max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
527 	for (++sg; sg < end; ++sg) {
528 		unsigned long addr, len;
529 		addr = SG_ENT_PHYS_ADDRESS(sg);
530 		len = sg->length;
531 
532 		if (leader_length + len > max_seg_size)
533 			goto new_segment;
534 
535 		if (next_paddr == addr) {
536 			sg->dma_address = -1;
537 			leader_length += len;
538 		} else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
539 			sg->dma_address = -2;
540 			leader_flag = 1;
541 			leader_length += len;
542 		} else {
543 new_segment:
544 			leader->dma_address = leader_flag;
545 			leader->dma_length = leader_length;
546 			leader = sg;
547 			leader_flag = 0;
548 			leader_length = len;
549 		}
550 
551 		next_paddr = addr + len;
552 	}
553 
554 	leader->dma_address = leader_flag;
555 	leader->dma_length = leader_length;
556 }
557 
558 /* Given a scatterlist leader, choose an allocation method and fill
559    in the blanks.  */
560 
561 static int
sg_fill(struct device * dev,struct scatterlist * leader,struct scatterlist * end,struct scatterlist * out,struct pci_iommu_arena * arena,dma_addr_t max_dma,int dac_allowed)562 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
563 	struct scatterlist *out, struct pci_iommu_arena *arena,
564 	dma_addr_t max_dma, int dac_allowed)
565 {
566 	unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
567 	long size = leader->dma_length;
568 	struct scatterlist *sg;
569 	unsigned long *ptes;
570 	long npages, dma_ofs, i;
571 
572 #if !DEBUG_NODIRECT
573 	/* If everything is physically contiguous, and the addresses
574 	   fall into the direct-map window, use it.  */
575 	if (leader->dma_address == 0
576 	    && paddr + size + __direct_map_base - 1 <= max_dma
577 	    && paddr + size <= __direct_map_size) {
578 		out->dma_address = paddr + __direct_map_base;
579 		out->dma_length = size;
580 
581 		DBGA("    sg_fill: [%p,%lx] -> direct %llx\n",
582 		     __va(paddr), size, out->dma_address);
583 
584 		return 0;
585 	}
586 #endif
587 
588 	/* If physically contiguous and DAC is available, use it.  */
589 	if (leader->dma_address == 0 && dac_allowed) {
590 		out->dma_address = paddr + alpha_mv.pci_dac_offset;
591 		out->dma_length = size;
592 
593 		DBGA("    sg_fill: [%p,%lx] -> DAC %llx\n",
594 		     __va(paddr), size, out->dma_address);
595 
596 		return 0;
597 	}
598 
599 	/* Otherwise, we'll use the iommu to make the pages virtually
600 	   contiguous.  */
601 
602 	paddr &= ~PAGE_MASK;
603 	npages = iommu_num_pages(paddr, size, PAGE_SIZE);
604 	dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
605 	if (dma_ofs < 0) {
606 		/* If we attempted a direct map above but failed, die.  */
607 		if (leader->dma_address == 0)
608 			return -1;
609 
610 		/* Otherwise, break up the remaining virtually contiguous
611 		   hunks into individual direct maps and retry.  */
612 		sg_classify(dev, leader, end, 0);
613 		return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
614 	}
615 
616 	out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
617 	out->dma_length = size;
618 
619 	DBGA("    sg_fill: [%p,%lx] -> sg %llx np %ld\n",
620 	     __va(paddr), size, out->dma_address, npages);
621 
622 	/* All virtually contiguous.  We need to find the length of each
623 	   physically contiguous subsegment to fill in the ptes.  */
624 	ptes = &arena->ptes[dma_ofs];
625 	sg = leader;
626 	do {
627 #if DEBUG_ALLOC > 0
628 		struct scatterlist *last_sg = sg;
629 #endif
630 
631 		size = sg->length;
632 		paddr = SG_ENT_PHYS_ADDRESS(sg);
633 
634 		while (sg+1 < end && (int) sg[1].dma_address == -1) {
635 			size += sg[1].length;
636 			sg = sg_next(sg);
637 		}
638 
639 		npages = iommu_num_pages(paddr, size, PAGE_SIZE);
640 
641 		paddr &= PAGE_MASK;
642 		for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
643 			*ptes++ = mk_iommu_pte(paddr);
644 
645 #if DEBUG_ALLOC > 0
646 		DBGA("    (%ld) [%p,%x] np %ld\n",
647 		     last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
648 		     last_sg->length, npages);
649 		while (++last_sg <= sg) {
650 			DBGA("        (%ld) [%p,%x] cont\n",
651 			     last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
652 			     last_sg->length);
653 		}
654 #endif
655 	} while (++sg < end && (int) sg->dma_address < 0);
656 
657 	return 1;
658 }
659 
alpha_pci_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)660 static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
661 			    int nents, enum dma_data_direction dir,
662 			    unsigned long attrs)
663 {
664 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
665 	struct scatterlist *start, *end, *out;
666 	struct pci_controller *hose;
667 	struct pci_iommu_arena *arena;
668 	dma_addr_t max_dma;
669 	int dac_allowed;
670 
671 	BUG_ON(dir == PCI_DMA_NONE);
672 
673 	dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
674 
675 	/* Fast path single entry scatterlists.  */
676 	if (nents == 1) {
677 		sg->dma_length = sg->length;
678 		sg->dma_address
679 		  = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
680 				     sg->length, dac_allowed);
681 		return sg->dma_address != DMA_MAPPING_ERROR;
682 	}
683 
684 	start = sg;
685 	end = sg + nents;
686 
687 	/* First, prepare information about the entries.  */
688 	sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
689 
690 	/* Second, figure out where we're going to map things.  */
691 	if (alpha_mv.mv_pci_tbi) {
692 		hose = pdev ? pdev->sysdata : pci_isa_hose;
693 		max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
694 		arena = hose->sg_pci;
695 		if (!arena || arena->dma_base + arena->size - 1 > max_dma)
696 			arena = hose->sg_isa;
697 	} else {
698 		max_dma = -1;
699 		arena = NULL;
700 		hose = NULL;
701 	}
702 
703 	/* Third, iterate over the scatterlist leaders and allocate
704 	   dma space as needed.  */
705 	for (out = sg; sg < end; ++sg) {
706 		if ((int) sg->dma_address < 0)
707 			continue;
708 		if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
709 			goto error;
710 		out++;
711 	}
712 
713 	/* Mark the end of the list for pci_unmap_sg.  */
714 	if (out < end)
715 		out->dma_length = 0;
716 
717 	if (out - start == 0)
718 		printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
719 	DBGA("pci_map_sg: %ld entries\n", out - start);
720 
721 	return out - start;
722 
723  error:
724 	printk(KERN_WARNING "pci_map_sg failed: "
725 	       "could not allocate dma page tables\n");
726 
727 	/* Some allocation failed while mapping the scatterlist
728 	   entries.  Unmap them now.  */
729 	if (out > start)
730 		pci_unmap_sg(pdev, start, out - start, dir);
731 	return 0;
732 }
733 
734 /* Unmap a set of streaming mode DMA translations.  Again, cpu read
735    rules concerning calls here are the same as for pci_unmap_single()
736    above.  */
737 
alpha_pci_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)738 static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
739 			       int nents, enum dma_data_direction dir,
740 			       unsigned long attrs)
741 {
742 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
743 	unsigned long flags;
744 	struct pci_controller *hose;
745 	struct pci_iommu_arena *arena;
746 	struct scatterlist *end;
747 	dma_addr_t max_dma;
748 	dma_addr_t fbeg, fend;
749 
750 	BUG_ON(dir == PCI_DMA_NONE);
751 
752 	if (! alpha_mv.mv_pci_tbi)
753 		return;
754 
755 	hose = pdev ? pdev->sysdata : pci_isa_hose;
756 	max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
757 	arena = hose->sg_pci;
758 	if (!arena || arena->dma_base + arena->size - 1 > max_dma)
759 		arena = hose->sg_isa;
760 
761 	fbeg = -1, fend = 0;
762 
763 	spin_lock_irqsave(&arena->lock, flags);
764 
765 	for (end = sg + nents; sg < end; ++sg) {
766 		dma_addr_t addr;
767 		size_t size;
768 		long npages, ofs;
769 		dma_addr_t tend;
770 
771 		addr = sg->dma_address;
772 		size = sg->dma_length;
773 		if (!size)
774 			break;
775 
776 		if (addr > 0xffffffff) {
777 			/* It's a DAC address -- nothing to do.  */
778 			DBGA("    (%ld) DAC [%llx,%zx]\n",
779 			      sg - end + nents, addr, size);
780 			continue;
781 		}
782 
783 		if (addr >= __direct_map_base
784 		    && addr < __direct_map_base + __direct_map_size) {
785 			/* Nothing to do.  */
786 			DBGA("    (%ld) direct [%llx,%zx]\n",
787 			      sg - end + nents, addr, size);
788 			continue;
789 		}
790 
791 		DBGA("    (%ld) sg [%llx,%zx]\n",
792 		     sg - end + nents, addr, size);
793 
794 		npages = iommu_num_pages(addr, size, PAGE_SIZE);
795 		ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
796 		iommu_arena_free(arena, ofs, npages);
797 
798 		tend = addr + size - 1;
799 		if (fbeg > addr) fbeg = addr;
800 		if (fend < tend) fend = tend;
801 	}
802 
803         /* If we're freeing ptes above the `next_entry' pointer (they
804            may have snuck back into the TLB since the last wrap flush),
805            we need to flush the TLB before reallocating the latter.  */
806 	if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
807 		alpha_mv.mv_pci_tbi(hose, fbeg, fend);
808 
809 	spin_unlock_irqrestore(&arena->lock, flags);
810 
811 	DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
812 }
813 
814 /* Return whether the given PCI device DMA address mask can be
815    supported properly.  */
816 
alpha_pci_supported(struct device * dev,u64 mask)817 static int alpha_pci_supported(struct device *dev, u64 mask)
818 {
819 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
820 	struct pci_controller *hose;
821 	struct pci_iommu_arena *arena;
822 
823 	/* If there exists a direct map, and the mask fits either
824 	   the entire direct mapped space or the total system memory as
825 	   shifted by the map base */
826 	if (__direct_map_size != 0
827 	    && (__direct_map_base + __direct_map_size - 1 <= mask ||
828 		__direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
829 		return 1;
830 
831 	/* Check that we have a scatter-gather arena that fits.  */
832 	hose = pdev ? pdev->sysdata : pci_isa_hose;
833 	arena = hose->sg_isa;
834 	if (arena && arena->dma_base + arena->size - 1 <= mask)
835 		return 1;
836 	arena = hose->sg_pci;
837 	if (arena && arena->dma_base + arena->size - 1 <= mask)
838 		return 1;
839 
840 	/* As last resort try ZONE_DMA.  */
841 	if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
842 		return 1;
843 
844 	return 0;
845 }
846 
847 
848 /*
849  * AGP GART extensions to the IOMMU
850  */
851 int
iommu_reserve(struct pci_iommu_arena * arena,long pg_count,long align_mask)852 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
853 {
854 	unsigned long flags;
855 	unsigned long *ptes;
856 	long i, p;
857 
858 	if (!arena) return -EINVAL;
859 
860 	spin_lock_irqsave(&arena->lock, flags);
861 
862 	/* Search for N empty ptes.  */
863 	ptes = arena->ptes;
864 	p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
865 	if (p < 0) {
866 		spin_unlock_irqrestore(&arena->lock, flags);
867 		return -1;
868 	}
869 
870 	/* Success.  Mark them all reserved (ie not zero and invalid)
871 	   for the iommu tlb that could load them from under us.
872 	   They will be filled in with valid bits by _bind() */
873 	for (i = 0; i < pg_count; ++i)
874 		ptes[p+i] = IOMMU_RESERVED_PTE;
875 
876 	arena->next_entry = p + pg_count;
877 	spin_unlock_irqrestore(&arena->lock, flags);
878 
879 	return p;
880 }
881 
882 int
iommu_release(struct pci_iommu_arena * arena,long pg_start,long pg_count)883 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
884 {
885 	unsigned long *ptes;
886 	long i;
887 
888 	if (!arena) return -EINVAL;
889 
890 	ptes = arena->ptes;
891 
892 	/* Make sure they're all reserved first... */
893 	for(i = pg_start; i < pg_start + pg_count; i++)
894 		if (ptes[i] != IOMMU_RESERVED_PTE)
895 			return -EBUSY;
896 
897 	iommu_arena_free(arena, pg_start, pg_count);
898 	return 0;
899 }
900 
901 int
iommu_bind(struct pci_iommu_arena * arena,long pg_start,long pg_count,struct page ** pages)902 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
903 	   struct page **pages)
904 {
905 	unsigned long flags;
906 	unsigned long *ptes;
907 	long i, j;
908 
909 	if (!arena) return -EINVAL;
910 
911 	spin_lock_irqsave(&arena->lock, flags);
912 
913 	ptes = arena->ptes;
914 
915 	for(j = pg_start; j < pg_start + pg_count; j++) {
916 		if (ptes[j] != IOMMU_RESERVED_PTE) {
917 			spin_unlock_irqrestore(&arena->lock, flags);
918 			return -EBUSY;
919 		}
920 	}
921 
922 	for(i = 0, j = pg_start; i < pg_count; i++, j++)
923 		ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
924 
925 	spin_unlock_irqrestore(&arena->lock, flags);
926 
927 	return 0;
928 }
929 
930 int
iommu_unbind(struct pci_iommu_arena * arena,long pg_start,long pg_count)931 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
932 {
933 	unsigned long *p;
934 	long i;
935 
936 	if (!arena) return -EINVAL;
937 
938 	p = arena->ptes + pg_start;
939 	for(i = 0; i < pg_count; i++)
940 		p[i] = IOMMU_RESERVED_PTE;
941 
942 	return 0;
943 }
944 
945 const struct dma_map_ops alpha_pci_ops = {
946 	.alloc			= alpha_pci_alloc_coherent,
947 	.free			= alpha_pci_free_coherent,
948 	.map_page		= alpha_pci_map_page,
949 	.unmap_page		= alpha_pci_unmap_page,
950 	.map_sg			= alpha_pci_map_sg,
951 	.unmap_sg		= alpha_pci_unmap_sg,
952 	.dma_supported		= alpha_pci_supported,
953 	.mmap			= dma_common_mmap,
954 	.get_sgtable		= dma_common_get_sgtable,
955 	.alloc_pages		= dma_common_alloc_pages,
956 	.free_pages		= dma_common_free_pages,
957 };
958 EXPORT_SYMBOL(alpha_pci_ops);
959