Lines Matching refs:iommu
276 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in find_dev_data() local
285 if (translation_pre_enabled(iommu)) in find_dev_data()
397 struct amd_iommu *iommu; in iommu_init_device() local
399 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
400 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
573 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) in iommu_print_event() argument
575 struct device *dev = iommu->iommu.dev; in iommu_print_event()
662 static void iommu_poll_events(struct amd_iommu *iommu) in iommu_poll_events() argument
666 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
667 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
670 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
674 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
677 static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) in iommu_handle_ppr_entry() argument
695 static void iommu_poll_ppr_log(struct amd_iommu *iommu) in iommu_poll_ppr_log() argument
699 if (iommu->ppr_log == NULL) in iommu_poll_ppr_log()
702 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
703 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
710 raw = (u64 *)(iommu->ppr_log + head); in iommu_poll_ppr_log()
735 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
738 iommu_handle_ppr_entry(iommu, entry); in iommu_poll_ppr_log()
741 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
742 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
757 static void iommu_poll_ga_log(struct amd_iommu *iommu) in iommu_poll_ga_log() argument
761 if (iommu->ga_log == NULL) in iommu_poll_ga_log()
764 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
765 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); in iommu_poll_ga_log()
771 raw = (u64 *)(iommu->ga_log + head); in iommu_poll_ga_log()
779 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); in iommu_poll_ga_log()
801 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) in amd_iommu_set_pci_msi_domain() argument
807 dev_set_msi_domain(dev, iommu->msi_domain); in amd_iommu_set_pci_msi_domain()
812 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } in amd_iommu_set_pci_msi_domain() argument
822 struct amd_iommu *iommu = (struct amd_iommu *) data; in amd_iommu_int_thread() local
823 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
828 iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
832 iommu_poll_events(iommu); in amd_iommu_int_thread()
837 iommu_poll_ppr_log(iommu); in amd_iommu_int_thread()
843 iommu_poll_ga_log(iommu); in amd_iommu_int_thread()
860 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
876 static int wait_on_sem(struct amd_iommu *iommu, u64 data) in wait_on_sem() argument
880 while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { in wait_on_sem()
893 static void copy_cmd_to_buffer(struct amd_iommu *iommu, in copy_cmd_to_buffer() argument
900 tail = iommu->cmd_buf_tail; in copy_cmd_to_buffer()
901 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
905 iommu->cmd_buf_tail = tail; in copy_cmd_to_buffer()
908 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
912 struct amd_iommu *iommu, in build_completion_wait() argument
915 u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); in build_completion_wait()
1064 static int __iommu_queue_command_sync(struct amd_iommu *iommu, in __iommu_queue_command_sync() argument
1071 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1073 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; in __iommu_queue_command_sync()
1087 iommu->cmd_buf_head = readl(iommu->mmio_base + in __iommu_queue_command_sync()
1093 copy_cmd_to_buffer(iommu, cmd); in __iommu_queue_command_sync()
1096 iommu->need_sync = sync; in __iommu_queue_command_sync()
1101 static int iommu_queue_command_sync(struct amd_iommu *iommu, in iommu_queue_command_sync() argument
1108 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
1109 ret = __iommu_queue_command_sync(iommu, cmd, sync); in iommu_queue_command_sync()
1110 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
1115 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) in iommu_queue_command() argument
1117 return iommu_queue_command_sync(iommu, cmd, true); in iommu_queue_command()
1124 static int iommu_completion_wait(struct amd_iommu *iommu) in iommu_completion_wait() argument
1131 if (!iommu->need_sync) in iommu_completion_wait()
1134 raw_spin_lock_irqsave(&iommu->lock, flags); in iommu_completion_wait()
1136 data = ++iommu->cmd_sem_val; in iommu_completion_wait()
1137 build_completion_wait(&cmd, iommu, data); in iommu_completion_wait()
1139 ret = __iommu_queue_command_sync(iommu, &cmd, false); in iommu_completion_wait()
1143 ret = wait_on_sem(iommu, data); in iommu_completion_wait()
1146 raw_spin_unlock_irqrestore(&iommu->lock, flags); in iommu_completion_wait()
1151 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
1157 return iommu_queue_command(iommu, &cmd); in iommu_flush_dte()
1160 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) in amd_iommu_flush_dte_all() argument
1165 iommu_flush_dte(iommu, devid); in amd_iommu_flush_dte_all()
1167 iommu_completion_wait(iommu); in amd_iommu_flush_dte_all()
1174 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) in amd_iommu_flush_tlb_all() argument
1182 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_all()
1185 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_all()
1188 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) in amd_iommu_flush_tlb_domid() argument
1194 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_tlb_domid()
1196 iommu_completion_wait(iommu); in amd_iommu_flush_tlb_domid()
1199 static void amd_iommu_flush_all(struct amd_iommu *iommu) in amd_iommu_flush_all() argument
1205 iommu_queue_command(iommu, &cmd); in amd_iommu_flush_all()
1206 iommu_completion_wait(iommu); in amd_iommu_flush_all()
1209 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1215 iommu_queue_command(iommu, &cmd); in iommu_flush_irt()
1218 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) in amd_iommu_flush_irt_all() argument
1223 iommu_flush_irt(iommu, devid); in amd_iommu_flush_irt_all()
1225 iommu_completion_wait(iommu); in amd_iommu_flush_irt_all()
1228 void iommu_flush_all_caches(struct amd_iommu *iommu) in iommu_flush_all_caches() argument
1230 if (iommu_feature(iommu, FEATURE_IA)) { in iommu_flush_all_caches()
1231 amd_iommu_flush_all(iommu); in iommu_flush_all_caches()
1233 amd_iommu_flush_dte_all(iommu); in iommu_flush_all_caches()
1234 amd_iommu_flush_irt_all(iommu); in iommu_flush_all_caches()
1235 amd_iommu_flush_tlb_all(iommu); in iommu_flush_all_caches()
1245 struct amd_iommu *iommu; in device_flush_iotlb() local
1250 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1254 return iommu_queue_command(iommu, &cmd); in device_flush_iotlb()
1259 struct amd_iommu *iommu = data; in device_flush_dte_alias() local
1261 return iommu_flush_dte(iommu, alias); in device_flush_dte_alias()
1269 struct amd_iommu *iommu; in device_flush_dte() local
1273 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1277 device_flush_dte_alias, iommu); in device_flush_dte()
1279 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1285 ret = iommu_flush_dte(iommu, alias); in device_flush_dte()
1920 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in set_dte_entry() local
1922 if (iommu_feature(iommu, FEATURE_EPHSUP)) in set_dte_entry()
1965 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in set_dte_entry() local
1967 amd_iommu_flush_tlb_domid(iommu, old_domid); in set_dte_entry()
1984 struct amd_iommu *iommu; in do_attach() local
1987 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
1995 domain->dev_iommu[iommu->index] += 1; in do_attach()
2010 struct amd_iommu *iommu; in do_detach() local
2012 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
2030 domain->dev_iommu[iommu->index] -= 1; in do_detach()
2218 struct amd_iommu *iommu; in amd_iommu_probe_device() local
2228 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_probe_device()
2231 return &iommu->iommu; in amd_iommu_probe_device()
2240 amd_iommu_set_pci_msi_domain(dev, iommu); in amd_iommu_probe_device()
2241 iommu_dev = &iommu->iommu; in amd_iommu_probe_device()
2244 iommu_completion_wait(iommu); in amd_iommu_probe_device()
2262 struct amd_iommu *iommu; in amd_iommu_release_device() local
2267 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_release_device()
2270 iommu_completion_wait(iommu); in amd_iommu_release_device()
2516 struct amd_iommu *iommu; in amd_iommu_detach_device() local
2529 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_detach_device()
2530 if (!iommu) in amd_iommu_detach_device()
2539 iommu_completion_wait(iommu); in amd_iommu_detach_device()
2547 struct amd_iommu *iommu; in amd_iommu_attach_device() local
2556 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
2557 if (!iommu) in amd_iommu_attach_device()
2574 iommu_completion_wait(iommu); in amd_iommu_attach_device()
2900 struct amd_iommu *iommu; in __flush_pasid() local
2911 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
2916 ret = iommu_queue_command(iommu, &cmd); in __flush_pasid()
3074 struct amd_iommu *iommu; in amd_iommu_complete_ppr() local
3078 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
3083 return iommu_queue_command(iommu, &cmd); in amd_iommu_complete_ppr()
3239 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, in set_remap_table_entry() argument
3244 iommu_flush_dte(iommu, devid); in set_remap_table_entry()
3264 struct amd_iommu *iommu; in alloc_irq_table() local
3270 iommu = amd_iommu_rlookup_table[devid]; in alloc_irq_table()
3271 if (!iommu) in alloc_irq_table()
3281 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3299 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3310 set_remap_table_entry(iommu, devid, table); in alloc_irq_table()
3313 set_remap_table_entry(iommu, alias, table); in alloc_irq_table()
3316 iommu_completion_wait(iommu); in alloc_irq_table()
3334 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in alloc_irq_index() local
3336 if (!iommu) in alloc_irq_index()
3351 if (!iommu->irte_ops->is_allocated(table, index)) { in alloc_irq_index()
3361 iommu->irte_ops->set_allocated(table, index - c + 1); in alloc_irq_index()
3383 struct amd_iommu *iommu; in modify_irte_ga() local
3387 iommu = amd_iommu_rlookup_table[devid]; in modify_irte_ga()
3388 if (iommu == NULL) in modify_irte_ga()
3416 iommu_flush_irt(iommu, devid); in modify_irte_ga()
3417 iommu_completion_wait(iommu); in modify_irte_ga()
3425 struct amd_iommu *iommu; in modify_irte() local
3428 iommu = amd_iommu_rlookup_table[devid]; in modify_irte()
3429 if (iommu == NULL) in modify_irte()
3440 iommu_flush_irt(iommu, devid); in modify_irte()
3441 iommu_completion_wait(iommu); in modify_irte()
3449 struct amd_iommu *iommu; in free_irte() local
3452 iommu = amd_iommu_rlookup_table[devid]; in free_irte()
3453 if (iommu == NULL) in free_irte()
3461 iommu->irte_ops->clear_allocated(table, index); in free_irte()
3464 iommu_flush_irt(iommu, devid); in free_irte()
3465 iommu_completion_wait(iommu); in free_irte()
3622 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in get_irq_domain_for_devid() local
3624 if (!iommu) in get_irq_domain_for_devid()
3630 return iommu->ir_domain; in get_irq_domain_for_devid()
3667 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; in irq_remapping_prepare_irte() local
3669 if (!iommu) in irq_remapping_prepare_irte()
3674 iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode, in irq_remapping_prepare_irte()
3760 struct amd_iommu *iommu; in irq_remapping_alloc() local
3770 iommu = amd_iommu_rlookup_table[devid]; in irq_remapping_alloc()
3772 iommu->irte_ops->set_allocated(table, i); in irq_remapping_alloc()
3861 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3871 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in irq_remapping_activate() local
3874 if (!iommu) in irq_remapping_activate()
3877 iommu->irte_ops->activate(data->entry, irte_info->devid, in irq_remapping_activate()
3879 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg); in irq_remapping_activate()
3888 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in irq_remapping_deactivate() local
3890 if (iommu) in irq_remapping_deactivate()
3891 iommu->irte_ops->deactivate(data->entry, irte_info->devid, in irq_remapping_deactivate()
3962 struct amd_iommu *iommu; in amd_ir_set_vcpu_affinity() local
3989 iommu = amd_iommu_rlookup_table[irte_info->devid]; in amd_ir_set_vcpu_affinity()
3990 if (iommu == NULL) in amd_ir_set_vcpu_affinity()
4016 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, in amd_ir_update_irte() argument
4026 iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid, in amd_ir_update_irte()
4038 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; in amd_ir_set_affinity() local
4041 if (!iommu) in amd_ir_set_affinity()
4048 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg); in amd_ir_set_affinity()
4074 int amd_iommu_create_irq_domain(struct amd_iommu *iommu) in amd_iommu_create_irq_domain() argument
4078 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); in amd_iommu_create_irq_domain()
4081 iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); in amd_iommu_create_irq_domain()
4082 if (!iommu->ir_domain) { in amd_iommu_create_irq_domain()
4087 iommu->ir_domain->parent = arch_get_ir_parent_domain(); in amd_iommu_create_irq_domain()
4088 iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, in amd_iommu_create_irq_domain()
4090 iommu->index); in amd_iommu_create_irq_domain()
4097 struct amd_iommu *iommu; in amd_iommu_update_ga() local
4108 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_update_ga()
4109 if (!iommu) in amd_iommu_update_ga()
4131 iommu_flush_irt(iommu, devid); in amd_iommu_update_ga()
4132 iommu_completion_wait(iommu); in amd_iommu_update_ga()