Lines Matching refs:dev_data

187 	struct iommu_dev_data *dev_data;  in alloc_dev_data()  local
190 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); in alloc_dev_data()
191 if (!dev_data) in alloc_dev_data()
194 spin_lock_init(&dev_data->lock); in alloc_dev_data()
195 dev_data->devid = devid; in alloc_dev_data()
196 ratelimit_default_init(&dev_data->rs); in alloc_dev_data()
198 llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list); in alloc_dev_data()
199 return dev_data; in alloc_dev_data()
204 struct iommu_dev_data *dev_data; in search_dev_data() local
212 llist_for_each_entry(dev_data, node, dev_data_list) { in search_dev_data()
213 if (dev_data->devid == devid) in search_dev_data()
214 return dev_data; in search_dev_data()
284 struct iommu_dev_data *dev_data; in find_dev_data() local
286 dev_data = search_dev_data(iommu, devid); in find_dev_data()
288 if (dev_data == NULL) { in find_dev_data()
289 dev_data = alloc_dev_data(iommu, devid); in find_dev_data()
290 if (!dev_data) in find_dev_data()
294 dev_data->defer_attach = true; in find_dev_data()
297 return dev_data; in find_dev_data()
377 struct iommu_dev_data *dev_data; in iommu_init_device() local
388 dev_data = find_dev_data(iommu, devid); in iommu_init_device()
389 if (!dev_data) in iommu_init_device()
392 dev_data->dev = dev; in iommu_init_device()
403 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
406 dev_iommu_priv_set(dev, dev_data); in iommu_init_device()
430 struct iommu_dev_data *dev_data; in amd_iommu_uninit_device() local
432 dev_data = dev_iommu_priv_get(dev); in amd_iommu_uninit_device()
433 if (!dev_data) in amd_iommu_uninit_device()
436 if (dev_data->domain) in amd_iommu_uninit_device()
473 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_hw_error() local
486 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_hw_error()
488 if (dev_data) { in amd_iommu_report_rmp_hw_error()
489 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_hw_error()
505 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_fault() local
519 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_fault()
521 if (dev_data) { in amd_iommu_report_rmp_fault()
522 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_fault()
546 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_page_fault() local
552 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_page_fault()
554 if (dev_data) { in amd_iommu_report_page_fault()
562 if (dev_data->domain == NULL) { in amd_iommu_report_page_fault()
570 if (!report_iommu_fault(&dev_data->domain->domain, in amd_iommu_report_page_fault()
578 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_page_fault()
1313 static int device_flush_iotlb(struct iommu_dev_data *dev_data, in device_flush_iotlb() argument
1320 qdep = dev_data->ats.qdep; in device_flush_iotlb()
1321 iommu = rlookup_amd_iommu(dev_data->dev); in device_flush_iotlb()
1325 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); in device_flush_iotlb()
1340 static int device_flush_dte(struct iommu_dev_data *dev_data) in device_flush_dte() argument
1348 iommu = rlookup_amd_iommu(dev_data->dev); in device_flush_dte()
1352 if (dev_is_pci(dev_data->dev)) in device_flush_dte()
1353 pdev = to_pci_dev(dev_data->dev); in device_flush_dte()
1359 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1364 alias = pci_seg->alias_table[dev_data->devid]; in device_flush_dte()
1365 if (alias != dev_data->devid) { in device_flush_dte()
1371 if (dev_data->ats.enabled) in device_flush_dte()
1372 ret = device_flush_iotlb(dev_data, 0, ~0UL); in device_flush_dte()
1385 struct iommu_dev_data *dev_data; in __domain_flush_pages() local
1402 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1404 if (!dev_data->ats.enabled) in __domain_flush_pages()
1407 ret |= device_flush_iotlb(dev_data, address, size); in __domain_flush_pages()
1498 struct iommu_dev_data *dev_data; in domain_flush_devices() local
1500 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1501 device_flush_dte(dev_data); in domain_flush_devices()
1679 static void do_attach(struct iommu_dev_data *dev_data, in do_attach() argument
1685 iommu = rlookup_amd_iommu(dev_data->dev); in do_attach()
1688 ats = dev_data->ats.enabled; in do_attach()
1691 dev_data->domain = domain; in do_attach()
1692 list_add(&dev_data->list, &domain->dev_list); in do_attach()
1696 domain->nid = dev_to_node(dev_data->dev); in do_attach()
1703 set_dte_entry(iommu, dev_data->devid, domain, in do_attach()
1704 ats, dev_data->iommu_v2); in do_attach()
1705 clone_aliases(iommu, dev_data->dev); in do_attach()
1707 device_flush_dte(dev_data); in do_attach()
1710 static void do_detach(struct iommu_dev_data *dev_data) in do_detach() argument
1712 struct protection_domain *domain = dev_data->domain; in do_detach()
1715 iommu = rlookup_amd_iommu(dev_data->dev); in do_detach()
1720 dev_data->domain = NULL; in do_detach()
1721 list_del(&dev_data->list); in do_detach()
1722 clear_dte_entry(iommu, dev_data->devid); in do_detach()
1723 clone_aliases(iommu, dev_data->dev); in do_detach()
1726 device_flush_dte(dev_data); in do_detach()
1788 struct iommu_dev_data *dev_data; in attach_device() local
1795 dev_data = dev_iommu_priv_get(dev); in attach_device()
1797 spin_lock(&dev_data->lock); in attach_device()
1800 if (dev_data->domain != NULL) in attach_device()
1822 if (dev_data->iommu_v2) { in attach_device()
1826 dev_data->ats.enabled = true; in attach_device()
1827 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
1828 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); in attach_device()
1832 dev_data->ats.enabled = true; in attach_device()
1833 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
1839 do_attach(dev_data, domain); in attach_device()
1851 spin_unlock(&dev_data->lock); in attach_device()
1864 struct iommu_dev_data *dev_data; in detach_device() local
1867 dev_data = dev_iommu_priv_get(dev); in detach_device()
1868 domain = dev_data->domain; in detach_device()
1872 spin_lock(&dev_data->lock); in detach_device()
1880 if (WARN_ON(!dev_data->domain)) in detach_device()
1883 do_detach(dev_data); in detach_device()
1888 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
1890 else if (dev_data->ats.enabled) in detach_device()
1893 dev_data->ats.enabled = false; in detach_device()
1896 spin_unlock(&dev_data->lock); in detach_device()
1975 struct iommu_dev_data *dev_data; in update_device_table() local
1977 list_for_each_entry(dev_data, &domain->dev_list, list) { in update_device_table()
1978 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); in update_device_table()
1982 set_dte_entry(iommu, dev_data->devid, domain, in update_device_table()
1983 dev_data->ats.enabled, dev_data->iommu_v2); in update_device_table()
1984 clone_aliases(iommu, dev_data->dev); in update_device_table()
2203 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_attach_device() local
2212 if (dev_data->domain == domain) in amd_iommu_attach_device()
2215 dev_data->defer_attach = false; in amd_iommu_attach_device()
2217 if (dev_data->domain) in amd_iommu_attach_device()
2225 dev_data->use_vapic = 1; in amd_iommu_attach_device()
2227 dev_data->use_vapic = 0; in amd_iommu_attach_device()
2405 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_is_attach_deferred() local
2407 return dev_data->defer_attach; in amd_iommu_is_attach_deferred()
2436 struct iommu_dev_data *dev_data; in amd_iommu_def_domain_type() local
2438 dev_data = dev_iommu_priv_get(dev); in amd_iommu_def_domain_type()
2439 if (!dev_data) in amd_iommu_def_domain_type()
2449 if (dev_data->iommu_v2 && in amd_iommu_def_domain_type()
2578 struct iommu_dev_data *dev_data; in __flush_pasid() local
2604 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
2612 if (!dev_data->ats.enabled) in __flush_pasid()
2615 qdep = dev_data->ats.qdep; in __flush_pasid()
2616 iommu = rlookup_amd_iommu(dev_data->dev); in __flush_pasid()
2619 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, in __flush_pasid()
2775 struct iommu_dev_data *dev_data; in amd_iommu_complete_ppr() local
2779 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_complete_ppr()
2784 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
2785 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
3610 struct iommu_dev_data *dev_data; in amd_ir_set_vcpu_affinity() local
3615 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); in amd_ir_set_vcpu_affinity()
3621 if (!dev_data || !dev_data->use_vapic) in amd_ir_set_vcpu_affinity()