Lines Matching full:iommu

21 #include <linux/iommu-helper.h>
23 #include <linux/amd-iommu.h>
37 #include <asm/iommu.h>
42 #include "../dma-iommu.h"
73 * general struct to manage commands send to an IOMMU
122 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu) in get_dev_table() argument
125 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in get_dev_table()
151 /* Writes the specific IOMMU for a device into the PCI segment rlookup table */
152 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid) in amd_iommu_set_rlookup_table() argument
154 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in amd_iommu_set_rlookup_table()
156 pci_seg->rlookup_table[devid] = iommu; in amd_iommu_set_rlookup_table()
185 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) in alloc_dev_data() argument
188 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in alloc_dev_data()
202 static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid) in search_dev_data() argument
206 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in search_dev_data()
222 struct amd_iommu *iommu; in clone_alias() local
229 iommu = rlookup_amd_iommu(&pdev->dev); in clone_alias()
230 if (!iommu) in clone_alias()
233 amd_iommu_set_rlookup_table(iommu, alias); in clone_alias()
234 dev_table = get_dev_table(iommu); in clone_alias()
242 static void clone_aliases(struct amd_iommu *iommu, struct device *dev) in clone_aliases() argument
255 clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL); in clone_aliases()
260 static void setup_aliases(struct amd_iommu *iommu, struct device *dev) in setup_aliases() argument
263 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in setup_aliases()
279 clone_aliases(iommu, dev); in setup_aliases()
282 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid) in find_dev_data() argument
286 dev_data = search_dev_data(iommu, devid); in find_dev_data()
289 dev_data = alloc_dev_data(iommu, devid); in find_dev_data()
293 if (translation_pre_enabled(iommu)) in find_dev_data()
301 * Find or create an IOMMU group for a acpihid device.
352 struct amd_iommu *iommu; in check_device() local
363 iommu = rlookup_amd_iommu(dev); in check_device()
364 if (!iommu) in check_device()
368 pci_seg = iommu->pci_seg; in check_device()
375 static int iommu_init_device(struct amd_iommu *iommu, struct device *dev) in iommu_init_device() argument
388 dev_data = find_dev_data(iommu, devid); in iommu_init_device()
393 setup_aliases(iommu, dev); in iommu_init_device()
403 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
411 static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev) in iommu_ignore_device() argument
413 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in iommu_ignore_device()
414 struct dev_table_entry *dev_table = get_dev_table(iommu); in iommu_ignore_device()
425 setup_aliases(iommu, dev); in iommu_ignore_device()
453 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid) in dump_dte_entry() argument
456 struct dev_table_entry *dev_table = get_dev_table(iommu); in dump_dte_entry()
471 static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event) in amd_iommu_report_rmp_hw_error() argument
483 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_hw_error()
495 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_hw_error()
503 static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event) in amd_iommu_report_rmp_fault() argument
516 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_rmp_fault()
528 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_rmp_fault()
542 static void amd_iommu_report_page_fault(struct amd_iommu *iommu, in amd_iommu_report_page_fault() argument
549 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), in amd_iommu_report_page_fault()
575 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in amd_iommu_report_page_fault()
584 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) in iommu_print_event() argument
586 struct device *dev = iommu->iommu.dev; in iommu_print_event()
612 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags); in iommu_print_event()
619 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
621 dump_dte_entry(iommu, devid); in iommu_print_event()
626 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
631 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
644 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
649 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
653 amd_iommu_report_rmp_fault(iommu, event); in iommu_print_event()
656 amd_iommu_report_rmp_hw_error(iommu, event); in iommu_print_event()
662 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
673 static void iommu_poll_events(struct amd_iommu *iommu) in iommu_poll_events() argument
677 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
678 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
681 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
685 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
688 static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) in iommu_handle_ppr_entry() argument
699 fault.sbdf = PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, PPR_DEVID(raw[0])); in iommu_handle_ppr_entry()
706 static void iommu_poll_ppr_log(struct amd_iommu *iommu) in iommu_poll_ppr_log() argument
710 if (iommu->ppr_log == NULL) in iommu_poll_ppr_log()
713 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
714 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
721 raw = (u64 *)(iommu->ppr_log + head); in iommu_poll_ppr_log()
746 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
749 iommu_handle_ppr_entry(iommu, entry); in iommu_poll_ppr_log()
752 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
753 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
768 static void iommu_poll_ga_log(struct amd_iommu *iommu) in iommu_poll_ga_log() argument
772 if (iommu->ga_log == NULL) in iommu_poll_ga_log()
775 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
776 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_poll_ga_log()
782 raw = (u64 *)(iommu->ga_log + head); in iommu_poll_ga_log()
790 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
812 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) in amd_iommu_set_pci_msi_domain() argument
818 dev_set_msi_domain(dev, iommu->msi_domain); in amd_iommu_set_pci_msi_domain()
823 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } in amd_iommu_set_pci_msi_domain() argument
834 struct amd_iommu *iommu = (struct amd_iommu *) data; in amd_iommu_int_thread() local
835 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
840 iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
843 pr_devel("Processing IOMMU Event Log\n"); in amd_iommu_int_thread()
844 iommu_poll_events(iommu); in amd_iommu_int_thread()
848 pr_devel("Processing IOMMU PPR Log\n"); in amd_iommu_int_thread()
849 iommu_poll_ppr_log(iommu); in amd_iommu_int_thread()
854 pr_devel("Processing IOMMU GA Log\n"); in amd_iommu_int_thread()
855 iommu_poll_ga_log(iommu); in amd_iommu_int_thread()
860 pr_info_ratelimited("IOMMU event log overflow\n"); in amd_iommu_int_thread()
861 amd_iommu_restart_event_logging(iommu); in amd_iommu_int_thread()
872 * Workaround: The IOMMU driver should read back the in amd_iommu_int_thread()
877 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
889 * IOMMU command queuing functions
893 static int wait_on_sem(struct amd_iommu *iommu, u64 data) in wait_on_sem() argument
897 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { in wait_on_sem()
910 static void copy_cmd_to_buffer(struct amd_iommu *iommu, in copy_cmd_to_buffer() argument
917 tail = iommu->cmd_buf_tail; in copy_cmd_to_buffer()
918 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
922 iommu->cmd_buf_tail = tail; in copy_cmd_to_buffer()
924 /* Tell the IOMMU about it */ in copy_cmd_to_buffer()
925 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
929 struct amd_iommu *iommu, in build_completion_wait() argument
932 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); in build_completion_wait()
1090 static int __iommu_queue_command_sync(struct amd_iommu *iommu, in __iommu_queue_command_sync() argument
1097 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1099 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1113 iommu->cmd_buf_head = readl(iommu->mmio_base + in __iommu_queue_command_sync()
1119 copy_cmd_to_buffer(iommu, cmd); in __iommu_queue_command_sync()
1122 iommu->need_sync = sync; in __iommu_queue_command_sync()
1127 static int iommu_queue_command_sync(struct amd_iommu *iommu, in iommu_queue_command_sync() argument
1134 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
1135 ret = __iommu_queue_command_sync(iommu, cmd, sync); in iommu_queue_command_sync()
1136 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1141 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) in iommu_queue_command() argument
1143 return iommu_queue_command_sync(iommu, cmd, true); in iommu_queue_command()
1148 * buffer of an IOMMU
1150 static int iommu_completion_wait(struct amd_iommu *iommu) in iommu_completion_wait() argument
1157 if (!iommu->need_sync) in iommu_completion_wait()
1160 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_completion_wait()
1162 data = ++iommu->cmd_sem_val; in iommu_completion_wait()
1163 build_completion_wait(&cmd, iommu, data); in iommu_completion_wait()
1165 ret = __iommu_queue_command_sync(iommu, &cmd, false); in iommu_completion_wait()
1169 ret = wait_on_sem(iommu, data); in iommu_completion_wait()
1172 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_completion_wait()
1177 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
1183 return iommu_queue_command(iommu, &cmd); in iommu_flush_dte()
1186 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) in amd_iommu_flush_dte_all() argument
1189 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_dte_all()
1192 iommu_flush_dte(iommu, devid); in amd_iommu_flush_dte_all()
1194 iommu_completion_wait(iommu); in amd_iommu_flush_dte_all()
1201 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) in amd_iommu_flush_tlb_all() argument
1204 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_tlb_all()
1210 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_all()
1213 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_all()
1216 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) in amd_iommu_flush_tlb_domid() argument
1222 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_domid()
1224 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_domid()
1227 static void amd_iommu_flush_all(struct amd_iommu *iommu) in amd_iommu_flush_all() argument
1233 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_all()
1234 iommu_completion_wait(iommu); in amd_iommu_flush_all()
1237 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1243 iommu_queue_command(iommu, &cmd); in iommu_flush_irt()
1246 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) in amd_iommu_flush_irt_all() argument
1249 u16 last_bdf = iommu->pci_seg->last_bdf; in amd_iommu_flush_irt_all()
1252 iommu_flush_irt(iommu, devid); in amd_iommu_flush_irt_all()
1254 iommu_completion_wait(iommu); in amd_iommu_flush_irt_all()
1257 void iommu_flush_all_caches(struct amd_iommu *iommu) in iommu_flush_all_caches() argument
1259 if (iommu_feature(iommu, FEATURE_IA)) { in iommu_flush_all_caches()
1260 amd_iommu_flush_all(iommu); in iommu_flush_all_caches()
1262 amd_iommu_flush_dte_all(iommu); in iommu_flush_all_caches()
1263 amd_iommu_flush_irt_all(iommu); in iommu_flush_all_caches()
1264 amd_iommu_flush_tlb_all(iommu); in iommu_flush_all_caches()
1274 struct amd_iommu *iommu; in device_flush_iotlb() local
1279 iommu = rlookup_amd_iommu(dev_data->dev); in device_flush_iotlb()
1280 if (!iommu) in device_flush_iotlb()
1285 return iommu_queue_command(iommu, &cmd); in device_flush_iotlb()
1290 struct amd_iommu *iommu = data; in device_flush_dte_alias() local
1292 return iommu_flush_dte(iommu, alias); in device_flush_dte_alias()
1300 struct amd_iommu *iommu; in device_flush_dte() local
1306 iommu = rlookup_amd_iommu(dev_data->dev); in device_flush_dte()
1307 if (!iommu) in device_flush_dte()
1315 device_flush_dte_alias, iommu); in device_flush_dte()
1317 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1321 pci_seg = iommu->pci_seg; in device_flush_dte()
1324 ret = iommu_flush_dte(iommu, alias); in device_flush_dte()
1338 * page. Otherwise it flushes the whole TLB of the IOMMU.
1354 * Devices of this domain are behind this IOMMU in __domain_flush_pages()
1429 * Devices of this domain are behind this IOMMU in amd_iommu_domain_flush_complete()
1465 * allocated for every IOMMU as the default domain. If device isolation
1538 static void set_dte_entry(struct amd_iommu *iommu, u16 devid, in set_dte_entry() argument
1544 struct dev_table_entry *dev_table = get_dev_table(iommu); in set_dte_entry()
1555 * When SNP is enabled, Only set TV bit when IOMMU in set_dte_entry()
1567 if (iommu_feature(iommu, FEATURE_EPHSUP)) in set_dte_entry()
1613 amd_iommu_flush_tlb_domid(iommu, old_domid); in set_dte_entry()
1617 static void clear_dte_entry(struct amd_iommu *iommu, u16 devid) in clear_dte_entry() argument
1619 struct dev_table_entry *dev_table = get_dev_table(iommu); in clear_dte_entry()
1629 amd_iommu_apply_erratum_63(iommu, devid); in clear_dte_entry()
1635 struct amd_iommu *iommu; in do_attach() local
1638 iommu = rlookup_amd_iommu(dev_data->dev); in do_attach()
1639 if (!iommu) in do_attach()
1648 domain->dev_iommu[iommu->index] += 1; in do_attach()
1656 set_dte_entry(iommu, dev_data->devid, domain, in do_attach()
1658 clone_aliases(iommu, dev_data->dev); in do_attach()
1666 struct amd_iommu *iommu; in do_detach() local
1668 iommu = rlookup_amd_iommu(dev_data->dev); in do_detach()
1669 if (!iommu) in do_detach()
1675 clear_dte_entry(iommu, dev_data->devid); in do_detach()
1676 clone_aliases(iommu, dev_data->dev); in do_detach()
1688 domain->dev_iommu[iommu->index] -= 1; in do_detach()
1794 * left the caches in the IOMMU dirty. So we have to flush in attach_device()
1855 struct amd_iommu *iommu; in amd_iommu_probe_device() local
1861 iommu = rlookup_amd_iommu(dev); in amd_iommu_probe_device()
1862 if (!iommu) in amd_iommu_probe_device()
1866 if (!iommu->iommu.ops) in amd_iommu_probe_device()
1870 return &iommu->iommu; in amd_iommu_probe_device()
1872 ret = iommu_init_device(iommu, dev); in amd_iommu_probe_device()
1877 iommu_ignore_device(iommu, dev); in amd_iommu_probe_device()
1879 amd_iommu_set_pci_msi_domain(dev, iommu); in amd_iommu_probe_device()
1880 iommu_dev = &iommu->iommu; in amd_iommu_probe_device()
1883 iommu_completion_wait(iommu); in amd_iommu_probe_device()
1897 struct amd_iommu *iommu; in amd_iommu_release_device() local
1902 iommu = rlookup_amd_iommu(dev); in amd_iommu_release_device()
1903 if (!iommu) in amd_iommu_release_device()
1907 iommu_completion_wait(iommu); in amd_iommu_release_device()
1929 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); in update_device_table() local
1931 if (!iommu) in update_device_table()
1933 set_dte_entry(iommu, dev_data->devid, domain, in update_device_table()
1935 clone_aliases(iommu, dev_data->dev); in update_device_table()
1957 * The following functions belong to the exported interface of AMD IOMMU
1959 * This interface allows access to lower level functions of the IOMMU
2052 * Force IOMMU v1 page table when iommu=pt and in protection_domain_alloc()
2134 struct amd_iommu *iommu; in amd_iommu_detach_device() local
2142 iommu = rlookup_amd_iommu(dev); in amd_iommu_detach_device()
2143 if (!iommu) in amd_iommu_detach_device()
2152 iommu_completion_wait(iommu); in amd_iommu_detach_device()
2160 struct amd_iommu *iommu; in amd_iommu_attach_device() local
2169 iommu = rlookup_amd_iommu(dev); in amd_iommu_attach_device()
2170 if (!iommu) in amd_iommu_attach_device()
2187 iommu_completion_wait(iommu); in amd_iommu_attach_device()
2233 * AMD's IOMMU can flush as many pages as necessary in a single flush. in amd_iommu_iotlb_gather_add_page()
2238 * hypervisor needs to synchronize the host IOMMU PTEs with those of in amd_iommu_iotlb_gather_add_page()
2301 struct amd_iommu *iommu; in amd_iommu_get_resv_regions() local
2310 iommu = rlookup_amd_iommu(dev); in amd_iommu_get_resv_regions()
2311 if (!iommu) in amd_iommu_get_resv_regions()
2313 pci_seg = iommu->pci_seg; in amd_iommu_get_resv_regions()
2440 * The next functions do a basic initialization of IOMMU for pass through
2443 * In passthrough mode the IOMMU is initialized and enabled but not used for
2538 * IOMMU TLB needs to be flushed before Device TLB to in __flush_pasid()
2539 * prevent device TLB refill from IOMMU TLB in __flush_pasid()
2550 /* Wait until IOMMU TLB flushes are complete */ in __flush_pasid()
2555 struct amd_iommu *iommu; in __flush_pasid() local
2566 iommu = rlookup_amd_iommu(dev_data->dev); in __flush_pasid()
2567 if (!iommu) in __flush_pasid()
2572 ret = iommu_queue_command(iommu, &cmd); in __flush_pasid()
2726 struct amd_iommu *iommu; in amd_iommu_complete_ppr() local
2730 iommu = rlookup_amd_iommu(&pdev->dev); in amd_iommu_complete_ppr()
2731 if (!iommu) in amd_iommu_complete_ppr()
2737 return iommu_queue_command(iommu, &cmd); in amd_iommu_complete_ppr()
2794 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid, in set_dte_irq_entry() argument
2798 struct dev_table_entry *dev_table = get_dev_table(iommu); in set_dte_irq_entry()
2810 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid) in get_irq_table() argument
2813 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in get_irq_table()
2816 "%s: no iommu for devid %x:%x\n", in get_irq_table()
2852 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, in set_remap_table_entry() argument
2855 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in set_remap_table_entry()
2858 set_dte_irq_entry(iommu, devid, table); in set_remap_table_entry()
2859 iommu_flush_dte(iommu, devid); in set_remap_table_entry()
2867 struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev); in set_remap_table_entry_alias() local
2869 if (!iommu) in set_remap_table_entry_alias()
2872 pci_seg = iommu->pci_seg; in set_remap_table_entry_alias()
2874 set_dte_irq_entry(iommu, alias, table); in set_remap_table_entry_alias()
2880 static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu, in alloc_irq_table() argument
2891 pci_seg = iommu->pci_seg; in alloc_irq_table()
2899 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
2917 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
2928 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
2931 set_remap_table_entry(iommu, alias, table); in alloc_irq_table()
2934 iommu_completion_wait(iommu); in alloc_irq_table()
2946 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count, in alloc_irq_index() argument
2953 table = alloc_irq_table(iommu, devid, pdev); in alloc_irq_index()
2965 if (!iommu->irte_ops->is_allocated(table, index)) { in alloc_irq_index()
2975 iommu->irte_ops->set_allocated(table, index - c + 1); in alloc_irq_index()
2992 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, in modify_irte_ga() argument
3000 table = get_irq_table(iommu, devid); in modify_irte_ga()
3025 iommu_flush_irt(iommu, devid); in modify_irte_ga()
3026 iommu_completion_wait(iommu); in modify_irte_ga()
3031 static int modify_irte(struct amd_iommu *iommu, in modify_irte() argument
3037 table = get_irq_table(iommu, devid); in modify_irte()
3045 iommu_flush_irt(iommu, devid); in modify_irte()
3046 iommu_completion_wait(iommu); in modify_irte()
3051 static void free_irte(struct amd_iommu *iommu, u16 devid, int index) in free_irte() argument
3056 table = get_irq_table(iommu, devid); in free_irte()
3061 iommu->irte_ops->clear_allocated(table, index); in free_irte()
3064 iommu_flush_irt(iommu, devid); in free_irte()
3065 iommu_completion_wait(iommu); in free_irte()
3098 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_activate() argument
3103 modify_irte(iommu, devid, index, irte); in irte_activate()
3106 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_ga_activate() argument
3111 modify_irte_ga(iommu, devid, index, irte, NULL); in irte_ga_activate()
3114 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_deactivate() argument
3119 modify_irte(iommu, devid, index, irte); in irte_deactivate()
3122 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) in irte_ga_deactivate() argument
3127 modify_irte_ga(iommu, devid, index, irte, NULL); in irte_ga_deactivate()
3130 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, in irte_set_affinity() argument
3137 modify_irte(iommu, devid, index, irte); in irte_set_affinity()
3140 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, in irte_ga_set_affinity() argument
3151 modify_irte_ga(iommu, devid, index, irte, NULL); in irte_ga_set_affinity()
3239 struct amd_iommu *iommu = data->iommu; in irq_remapping_prepare_irte() local
3241 if (!iommu) in irq_remapping_prepare_irte()
3246 iommu->irte_ops->prepare(data->entry, apic->delivery_mode, in irq_remapping_prepare_irte()
3290 struct amd_iommu *iommu; in irq_remapping_alloc() local
3314 iommu = __rlookup_amd_iommu(seg, devid); in irq_remapping_alloc()
3315 if (!iommu) in irq_remapping_alloc()
3325 table = alloc_irq_table(iommu, devid, NULL); in irq_remapping_alloc()
3334 iommu->irte_ops->set_allocated(table, i); in irq_remapping_alloc()
3345 index = alloc_irq_index(iommu, devid, nr_irqs, align, in irq_remapping_alloc()
3348 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL); in irq_remapping_alloc()
3380 data->iommu = iommu; in irq_remapping_alloc()
3397 free_irte(iommu, devid, index + i); in irq_remapping_alloc()
3416 free_irte(data->iommu, irte_info->devid, irte_info->index); in irq_remapping_free()
3424 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3434 struct amd_iommu *iommu = data->iommu; in irq_remapping_activate() local
3437 if (!iommu) in irq_remapping_activate()
3440 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid, in irq_remapping_activate()
3442 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg); in irq_remapping_activate()
3451 struct amd_iommu *iommu = data->iommu; in irq_remapping_deactivate() local
3453 if (iommu) in irq_remapping_deactivate()
3454 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid, in irq_remapping_deactivate()
3461 struct amd_iommu *iommu; in irq_remapping_select() local
3474 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff)); in irq_remapping_select()
3476 return iommu && iommu->ir_domain == d; in irq_remapping_select()
3509 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_activate_guest_mode()
3539 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, in amd_iommu_deactivate_guest_mode()
3553 if (ir_data->iommu == NULL) in amd_ir_set_vcpu_affinity()
3556 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); in amd_ir_set_vcpu_affinity()
3601 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, in amd_ir_update_irte() argument
3611 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid, in amd_ir_update_irte()
3623 struct amd_iommu *iommu = ir_data->iommu; in amd_ir_set_affinity() local
3626 if (!iommu) in amd_ir_set_affinity()
3633 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg); in amd_ir_set_affinity()
3659 int amd_iommu_create_irq_domain(struct amd_iommu *iommu) in amd_iommu_create_irq_domain() argument
3663 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); in amd_iommu_create_irq_domain()
3666 iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); in amd_iommu_create_irq_domain()
3667 if (!iommu->ir_domain) { in amd_iommu_create_irq_domain()
3672 iommu->ir_domain->parent = arch_get_ir_parent_domain(); in amd_iommu_create_irq_domain()
3673 iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, in amd_iommu_create_irq_domain()
3675 iommu->index); in amd_iommu_create_irq_domain()
3682 struct amd_iommu *iommu; in amd_iommu_update_ga() local
3693 iommu = ir_data->iommu; in amd_iommu_update_ga()
3694 if (!iommu) in amd_iommu_update_ga()
3697 table = get_irq_table(iommu, devid); in amd_iommu_update_ga()
3716 iommu_flush_irt(iommu, devid); in amd_iommu_update_ga()
3717 iommu_completion_wait(iommu); in amd_iommu_update_ga()