Lines Matching +full:iommu +full:- +full:map
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
17 #include <uapi/linux/iommu.h>
31 * if the IOMMU page table format is equivalent.
46 /* iommu fault flags */
62 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
65 #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
68 * This are the possible domain-types
70 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
72 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
73 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
75 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
76 * This flag allows IOMMU drivers to implement
78 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
102 return domain->type & __IOMMU_DOMAIN_DMA_API; in iommu_is_dma_domain()
107 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
109 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
123 /* Arbitrary "never map this or give it to a device" address ranges */
127 /* Software-managed MSI translation window */
132 * struct iommu_resv_region - descriptor for a reserved memory region
136 * @prot: IOMMU Protection flags (READ/WRITE/...)
158 * enum iommu_dev_features - Per device IOMMU features
163 * Faults themselves instead of relying on the IOMMU. When
174 #define IOMMU_PASID_INVALID (-1U)
179 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
188 * ->unmap() function in struct iommu_ops before eventually being passed
189 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
190 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
191 * them. @queued is set to indicate when ->iotlb_flush_all() will be called
192 * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
203 * struct iommu_ops - iommu ops and capabilities
205 * @domain_alloc: allocate iommu domain
206 * @probe_device: Add device to iommu driver handling
207 * @release_device: Remove device from iommu driver handling
208 * @probe_finalize: Do final setup work after the device is added to an IOMMU
210 * @device_group: find iommu group for a particular device
212 * @of_xlate: add OF master IDs to iommu grouping
213 * @is_attach_deferred: Check if domain attach should be deferred from iommu
216 * iommu specific features.
222 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
223 * - IOMMU_DOMAIN_DMA: must use a dma domain
224 * - 0: use the default setting
232 /* Domain allocation and freeing by the iommu driver */
246 /* Per device IOMMU features */
267 * struct iommu_domain_ops - domain specific operations
268 * @attach_dev: attach an iommu domain to a device
269 * @detach_dev: detach an iommu domain from a device
270 * @map: map a physically contiguous memory region to an iommu domain
271 * @map_pages: map a physically contiguous set of pages of the same size to
272 * an iommu domain.
273 * @unmap: unmap a physically contiguous memory region from an iommu domain
274 * @unmap_pages: unmap a number of pages of the same size from an iommu domain
276 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
281 * including no-snoop TLPs on PCIe or other platform
291 int (*map)(struct iommu_domain *domain, unsigned long iova, member
320 * struct iommu_device - IOMMU core representation of one IOMMU hardware
322 * @list: Used by the iommu-core to keep a list of registered iommus
323 * @ops: iommu-ops for talking to this iommu
334 * struct iommu_fault_event - Generic fault event
348 * struct iommu_fault_param - per-device IOMMU fault data
349 * @handler: Callback function to handle IOMMU faults at device level
362 * struct dev_iommu - Collection of per-device IOMMU data
364 * @fault_param: IOMMU detected device fault reporting data
366 * @fwspec: IOMMU fwspec data
367 * @iommu_dev: IOMMU device this device is linked to
368 * @priv: IOMMU Driver private data
382 int iommu_device_register(struct iommu_device *iommu,
385 void iommu_device_unregister(struct iommu_device *iommu);
386 int iommu_device_sysfs_add(struct iommu_device *iommu,
390 void iommu_device_sysfs_remove(struct iommu_device *iommu);
391 int iommu_device_link(struct iommu_device *iommu, struct device *link);
392 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
404 .freelist = LIST_HEAD_INIT(gather->freelist), in iommu_iotlb_gather_init()
413 * within the IOMMU subsystem itself, so we should be able to trust in dev_iommu_ops()
416 return dev->iommu->iommu_dev->ops; in dev_iommu_ops()
505 if (domain->ops->flush_iotlb_all) in iommu_flush_iotlb_all()
506 domain->ops->flush_iotlb_all(domain); in iommu_flush_iotlb_all()
512 if (domain->ops->iotlb_sync) in iommu_iotlb_sync()
513 domain->ops->iotlb_sync(domain, iotlb_gather); in iommu_iotlb_sync()
519 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
525 * Helper for IOMMU drivers to check whether a new range and the gathered range
526 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
533 unsigned long start = iova, end = start + size - 1; in iommu_iotlb_gather_is_disjoint()
535 return gather->end != 0 && in iommu_iotlb_gather_is_disjoint()
536 (end + 1 < gather->start || start > gather->end + 1); in iommu_iotlb_gather_is_disjoint()
541 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
546 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
553 unsigned long end = iova + size - 1; in iommu_iotlb_gather_add_range()
555 if (gather->start > iova) in iommu_iotlb_gather_add_range()
556 gather->start = iova; in iommu_iotlb_gather_add_range()
557 if (gather->end < end) in iommu_iotlb_gather_add_range()
558 gather->end = end; in iommu_iotlb_gather_add_range()
562 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
563 * @domain: IOMMU domain to be invalidated
568 * Helper for IOMMU drivers to build invalidation commands based on individual
581 if ((gather->pgsize && gather->pgsize != size) || in iommu_iotlb_gather_add_page()
585 gather->pgsize = size; in iommu_iotlb_gather_add_page()
591 return gather && gather->queued; in iommu_iotlb_gather_queued()
598 /* FSL-MC device grouping function */
602 * struct iommu_fwspec - per-device IOMMU instance data
603 * @ops: ops for this device's IOMMU
604 * @iommu_fwnode: firmware handle for this device's IOMMU
607 * @ids: IDs which this device may present to the IOMMU
621 * struct iommu_sva - handle to a device-mm bond
635 if (dev->iommu) in dev_iommu_fwspec_get()
636 return dev->iommu->fwspec; in dev_iommu_fwspec_get()
644 dev->iommu->fwspec = fwspec; in dev_iommu_fwspec_set()
649 if (dev->iommu) in dev_iommu_priv_get()
650 return dev->iommu->priv; in dev_iommu_priv_get()
657 dev->iommu->priv = priv; in dev_iommu_priv_set()
715 return -ENODEV; in iommu_attach_device()
731 return -ENODEV; in iommu_map()
738 return -ENODEV; in iommu_map_atomic()
758 return -ENODEV; in iommu_map_sg()
765 return -ENODEV; in iommu_map_sg_atomic()
800 return -ENODEV; in iommu_get_group_resv_regions()
819 return -ENODEV; in iommu_attach_group()
829 return ERR_PTR(-ENODEV); in iommu_group_alloc()
846 return -ENODEV; in iommu_group_set_name()
852 return -ENODEV; in iommu_group_add_device()
863 return -ENODEV; in iommu_group_for_each_dev()
880 return -ENODEV; in iommu_register_device_fault_handler()
891 return -ENODEV; in iommu_report_device_fault()
897 return -ENODEV; in iommu_page_response()
902 return -ENODEV; in iommu_group_id()
911 static inline int iommu_device_register(struct iommu_device *iommu, in iommu_device_register() argument
915 return -ENODEV; in iommu_device_register()
938 static inline void iommu_device_unregister(struct iommu_device *iommu) in iommu_device_unregister() argument
942 static inline int iommu_device_sysfs_add(struct iommu_device *iommu, in iommu_device_sysfs_add() argument
947 return -ENODEV; in iommu_device_sysfs_add()
950 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) in iommu_device_sysfs_remove() argument
956 return -EINVAL; in iommu_device_link()
967 return -ENODEV; in iommu_fwspec_init()
977 return -ENODEV; in iommu_fwspec_add_ids()
989 return -ENODEV; in iommu_dev_enable_feature()
995 return -ENODEV; in iommu_dev_disable_feature()
1030 return -ENODEV; in iommu_group_claim_dma_owner()
1044 * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1045 * @domain: The IOMMU domain to perform the mapping
1046 * @iova: The start address to map the buffer
1048 * @prot: IOMMU protection bits
1051 * stored in the given sg_table object in the provided IOMMU domain.
1056 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); in iommu_map_sgtable()
1088 return -ENODEV; in iommu_get_msi_cookie()