Lines Matching +full:iommu +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
9 * See Documentation/core-api/dma-api-howto.rst for the interface specification.
29 #include <linux/iommu-helper.h>
34 #include <linux/dma-direct.h>
35 #include <linux/dma-map-ops.h>
38 #include <asm/iommu.h>
52 * If this is disabled the IOMMU will use an optimized flushing strategy
103 if (offset == -1) { in alloc_iommu()
109 if (offset != -1) { in alloc_iommu()
150 /* Debugging aid for drivers that don't free their IOMMU tables */
167 * Ran out of IOMMU space for this operation. This is very bad. in iommu_full()
176 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size); in iommu_full()
194 /* Map a single continuous physical area into the IOMMU.
195 * Caller needs to check if the iommu is needed and flush.
208 if (iommu_page == -1) { in dma_map_area()
224 /* Map a single area into the IOMMU */
265 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; in gart_unmap_page()
283 if (!s->dma_length || !s->length) in gart_unmap_sg()
285 gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0); in gart_unmap_sg()
303 if (nonforced_iommu(dev, addr, s->length)) { in dma_map_sg_nonforce()
304 addr = dma_map_area(dev, addr, s->length, dir, 0); in dma_map_sg_nonforce()
313 s->dma_address = addr; in dma_map_sg_nonforce()
314 s->dma_length = s->length; in dma_map_sg_nonforce()
321 /* Map multiple scatterlist entries continuous into the first. */
331 if (iommu_start == -1) in __dma_map_cont()
332 return -ENOMEM; in __dma_map_cont()
336 unsigned long phys_addr = s->dma_address; in __dma_map_cont()
338 BUG_ON(s != start && s->offset); in __dma_map_cont()
340 sout->dma_address = iommu_bus_base; in __dma_map_cont()
341 sout->dma_address += iommu_page*PAGE_SIZE + s->offset; in __dma_map_cont()
342 sout->dma_length = s->length; in __dma_map_cont()
344 sout->dma_length += s->length; in __dma_map_cont()
348 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); in __dma_map_cont()
349 while (pages--) { in __dma_map_cont()
355 BUG_ON(iommu_page - iommu_start != pages); in __dma_map_cont()
366 sout->dma_address = start->dma_address; in dma_map_cont()
367 sout->dma_length = start->length; in dma_map_cont()
374 * DMA map all entries in a scatterlist.
387 return -EINVAL; in gart_map_sg()
400 s->dma_address = addr; in gart_map_sg()
401 BUG_ON(s->length == 0); in gart_map_sg()
403 nextneed = need_iommu(dev, addr, s->length); in gart_map_sg()
412 if (!iommu_merge || !nextneed || !need || s->offset || in gart_map_sg()
413 (s->length + seg_size > max_seg_size) || in gart_map_sg()
414 (ps->offset + ps->length) % PAGE_SIZE) { in gart_map_sg()
415 ret = dma_map_cont(dev, start_sg, i - start, in gart_map_sg()
429 seg_size += s->length; in gart_map_sg()
431 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE); in gart_map_sg()
434 ret = dma_map_cont(dev, start_sg, i - start, sgmap, pages, need); in gart_map_sg()
441 sgmap->dma_length = 0; in gart_map_sg()
462 /* allocate and map a coherent mapping */
471 !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24)) in gart_alloc_coherent()
475 DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1); in gart_alloc_coherent()
507 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; in check_iommu_size()
510 pr_warn("PCI-DMA: Warning: Small IOMMU %luMB." in check_iommu_size()
546 struct pci_dev *dev = node_to_amd_nb(i)->misc; in enable_gart_translations()
551 /* Flush the GART-TLB to remove stale entries */ in enable_gart_translations()
580 pr_info("PCI-DMA: Restoring GART aperture settings\n"); in gart_fixup_northbridges()
583 struct pci_dev *dev = node_to_amd_nb(i)->misc; in gart_fixup_northbridges()
587 * step. Restore the pre-suspend aperture settings. in gart_fixup_northbridges()
596 pr_info("PCI-DMA: Resuming GART IOMMU\n"); in gart_resume()
620 pr_info("PCI-DMA: Disabling AGP.\n"); in init_amd_gatt()
622 aper_size = aper_base = info->aper_size = 0; in init_amd_gatt()
625 dev = node_to_amd_nb(i)->misc; in init_amd_gatt()
640 info->aper_base = aper_base; in init_amd_gatt()
641 info->aper_size = aper_size >> 20; in init_amd_gatt()
657 pr_info("PCI-DMA: aperture base @ %x size %u KB\n", in init_amd_gatt()
664 pr_warn("PCI-DMA: More than 4GB of RAM and no IOMMU - falling back to iommu=soft.\n"); in init_amd_gatt()
665 return -1; in init_amd_gatt()
698 dev = node_to_amd_nb(i)->misc; in gart_iommu_shutdown()
733 pr_warn("More than 4GB of memory but GART IOMMU not available.\n"); in gart_iommu_init()
734 pr_warn("falling back to iommu=soft.\n"); in gart_iommu_init()
739 /* need to map that range */ in gart_iommu_init()
749 pr_info("PCI-DMA: using GART IOMMU.\n"); in gart_iommu_init()
756 panic("Cannot allocate iommu bitmap\n"); in gart_iommu_init()
758 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", in gart_iommu_init()
762 iommu_start = aper_size - iommu_size; in gart_iommu_init()
767 * Unmap the IOMMU part of the GART. The alias of the page is in gart_iommu_init()
782 * do an explicit, full-scale wbinvd() _after_ having marked all in gart_iommu_init()
783 * the pages as Not-Present: in gart_iommu_init()
803 panic("Cannot allocate iommu scratch page"); in gart_iommu_init()
828 /* duplicated from pci-dma.c */ in gart_parse_options()