Lines Matching refs:dev_data

206 	struct iommu_dev_data *dev_data;  in alloc_dev_data()  local
208 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); in alloc_dev_data()
209 if (!dev_data) in alloc_dev_data()
212 dev_data->devid = devid; in alloc_dev_data()
213 ratelimit_default_init(&dev_data->rs); in alloc_dev_data()
215 llist_add(&dev_data->dev_data_list, &dev_data_list); in alloc_dev_data()
216 return dev_data; in alloc_dev_data()
221 struct iommu_dev_data *dev_data; in search_dev_data() local
228 llist_for_each_entry(dev_data, node, dev_data_list) { in search_dev_data()
229 if (dev_data->devid == devid) in search_dev_data()
230 return dev_data; in search_dev_data()
306 struct iommu_dev_data *dev_data; in find_dev_data() local
309 dev_data = search_dev_data(devid); in find_dev_data()
311 if (dev_data == NULL) { in find_dev_data()
312 dev_data = alloc_dev_data(devid); in find_dev_data()
313 if (!dev_data) in find_dev_data()
317 dev_data->defer_attach = true; in find_dev_data()
320 return dev_data; in find_dev_data()
377 struct iommu_dev_data *dev_data; in pdev_pri_erratum() local
379 dev_data = get_dev_data(&pdev->dev); in pdev_pri_erratum()
381 return dev_data->errata & (1 << erratum) ? true : false; in pdev_pri_erratum()
422 struct iommu_dev_data *dev_data; in iommu_init_device() local
435 dev_data = find_dev_data(devid); in iommu_init_device()
436 if (!dev_data) in iommu_init_device()
439 dev_data->alias = get_alias(dev); in iommu_init_device()
444 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
445 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
448 dev->archdata.iommu = dev_data; in iommu_init_device()
475 struct iommu_dev_data *dev_data; in iommu_uninit_device() local
485 dev_data = search_dev_data(devid); in iommu_uninit_device()
486 if (!dev_data) in iommu_uninit_device()
489 if (dev_data->domain) in iommu_uninit_device()
532 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_page_fault() local
538 dev_data = get_dev_data(&pdev->dev); in amd_iommu_report_page_fault()
540 if (dev_data && __ratelimit(&dev_data->rs)) { in amd_iommu_report_page_fault()
1192 static int device_flush_iotlb(struct iommu_dev_data *dev_data, in device_flush_iotlb() argument
1199 qdep = dev_data->ats.qdep; in device_flush_iotlb()
1200 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1202 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); in device_flush_iotlb()
1210 static int device_flush_dte(struct iommu_dev_data *dev_data) in device_flush_dte() argument
1216 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1217 alias = dev_data->alias; in device_flush_dte()
1219 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1220 if (!ret && alias != dev_data->devid) in device_flush_dte()
1225 if (dev_data->ats.enabled) in device_flush_dte()
1226 ret = device_flush_iotlb(dev_data, 0, ~0UL); in device_flush_dte()
1239 struct iommu_dev_data *dev_data; in __domain_flush_pages() local
1256 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1258 if (!dev_data->ats.enabled) in __domain_flush_pages()
1261 ret |= device_flush_iotlb(dev_data, address, size); in __domain_flush_pages()
1307 struct iommu_dev_data *dev_data; in domain_flush_devices() local
1309 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1310 device_flush_dte(dev_data); in domain_flush_devices()
1896 static void do_attach(struct iommu_dev_data *dev_data, in do_attach() argument
1903 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
1904 alias = dev_data->alias; in do_attach()
1905 ats = dev_data->ats.enabled; in do_attach()
1908 dev_data->domain = domain; in do_attach()
1909 list_add(&dev_data->list, &domain->dev_list); in do_attach()
1916 set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2); in do_attach()
1917 if (alias != dev_data->devid) in do_attach()
1918 set_dte_entry(alias, domain, ats, dev_data->iommu_v2); in do_attach()
1920 device_flush_dte(dev_data); in do_attach()
1923 static void do_detach(struct iommu_dev_data *dev_data) in do_detach() argument
1928 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
1929 alias = dev_data->alias; in do_detach()
1932 dev_data->domain->dev_iommu[iommu->index] -= 1; in do_detach()
1933 dev_data->domain->dev_cnt -= 1; in do_detach()
1936 dev_data->domain = NULL; in do_detach()
1937 list_del(&dev_data->list); in do_detach()
1938 clear_dte_entry(dev_data->devid); in do_detach()
1939 if (alias != dev_data->devid) in do_detach()
1943 device_flush_dte(dev_data); in do_detach()
1950 static int __attach_device(struct iommu_dev_data *dev_data, in __attach_device() argument
1959 if (dev_data->domain != NULL) in __attach_device()
1963 do_attach(dev_data, domain); in __attach_device()
2070 struct iommu_dev_data *dev_data; in attach_device() local
2074 dev_data = get_dev_data(dev); in attach_device()
2081 if (!dev_data->passthrough) in attach_device()
2084 if (dev_data->iommu_v2) { in attach_device()
2088 dev_data->ats.enabled = true; in attach_device()
2089 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
2090 dev_data->pri_tlp = pci_pri_tlp_required(pdev); in attach_device()
2094 dev_data->ats.enabled = true; in attach_device()
2095 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
2100 ret = __attach_device(dev_data, domain); in attach_device()
2116 static void __detach_device(struct iommu_dev_data *dev_data) in __detach_device() argument
2120 domain = dev_data->domain; in __detach_device()
2124 do_detach(dev_data); in __detach_device()
2135 struct iommu_dev_data *dev_data; in detach_device() local
2138 dev_data = get_dev_data(dev); in detach_device()
2139 domain = dev_data->domain; in detach_device()
2147 if (WARN_ON(!dev_data->domain)) in detach_device()
2152 __detach_device(dev_data); in detach_device()
2158 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
2160 else if (dev_data->ats.enabled) in detach_device()
2163 dev_data->ats.enabled = false; in detach_device()
2168 struct iommu_dev_data *dev_data; in amd_iommu_add_device() local
2194 dev_data = get_dev_data(dev); in amd_iommu_add_device()
2196 BUG_ON(!dev_data); in amd_iommu_add_device()
2198 if (iommu_pass_through || dev_data->iommu_v2) in amd_iommu_add_device()
2204 dev_data->passthrough = true; in amd_iommu_add_device()
2279 struct iommu_dev_data *dev_data; in update_device_table() local
2281 list_for_each_entry(dev_data, &domain->dev_list, list) { in update_device_table()
2282 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled, in update_device_table()
2283 dev_data->iommu_v2); in update_device_table()
2285 if (dev_data->devid == dev_data->alias) in update_device_table()
2289 set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled, in update_device_table()
2290 dev_data->iommu_v2); in update_device_table()
2955 struct iommu_dev_data *dev_data = dev->archdata.iommu; in amd_iommu_detach_device() local
2966 if (dev_data->domain != NULL) in amd_iommu_detach_device()
2976 dev_data->use_vapic = 0; in amd_iommu_detach_device()
2986 struct iommu_dev_data *dev_data; in amd_iommu_attach_device() local
2993 dev_data = dev->archdata.iommu; in amd_iommu_attach_device()
2995 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
2999 if (dev_data->domain) in amd_iommu_attach_device()
3007 dev_data->use_vapic = 1; in amd_iommu_attach_device()
3009 dev_data->use_vapic = 0; in amd_iommu_attach_device()
3166 struct iommu_dev_data *dev_data = dev->archdata.iommu; in amd_iommu_is_attach_deferred() local
3167 return dev_data->defer_attach; in amd_iommu_is_attach_deferred()
3299 struct iommu_dev_data *dev_data; in __flush_pasid() local
3325 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
3333 if (!dev_data->ats.enabled) in __flush_pasid()
3336 qdep = dev_data->ats.qdep; in __flush_pasid()
3337 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
3339 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, in __flush_pasid()
3495 struct iommu_dev_data *dev_data; in amd_iommu_complete_ppr() local
3499 dev_data = get_dev_data(&pdev->dev); in amd_iommu_complete_ppr()
3500 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
3502 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
3503 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
3527 struct iommu_dev_data *dev_data; in amd_iommu_enable_device_erratum() local
3532 dev_data = get_dev_data(&pdev->dev); in amd_iommu_enable_device_erratum()
3533 dev_data->errata |= (1 << erratum); in amd_iommu_enable_device_erratum()
4303 struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid); in amd_ir_set_vcpu_affinity() local
4309 if (!dev_data || !dev_data->use_vapic) in amd_ir_set_vcpu_affinity()