Lines Matching refs:dev_data

188 	struct iommu_dev_data *dev_data;  in alloc_dev_data()  local
190 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); in alloc_dev_data()
191 if (!dev_data) in alloc_dev_data()
194 spin_lock_init(&dev_data->lock); in alloc_dev_data()
195 dev_data->devid = devid; in alloc_dev_data()
196 ratelimit_default_init(&dev_data->rs); in alloc_dev_data()
198 llist_add(&dev_data->dev_data_list, &dev_data_list); in alloc_dev_data()
199 return dev_data; in alloc_dev_data()
204 struct iommu_dev_data *dev_data; in search_dev_data() local
211 llist_for_each_entry(dev_data, node, dev_data_list) { in search_dev_data()
212 if (dev_data->devid == devid) in search_dev_data()
213 return dev_data; in search_dev_data()
275 struct iommu_dev_data *dev_data; in find_dev_data() local
278 dev_data = search_dev_data(devid); in find_dev_data()
280 if (dev_data == NULL) { in find_dev_data()
281 dev_data = alloc_dev_data(devid); in find_dev_data()
282 if (!dev_data) in find_dev_data()
286 dev_data->defer_attach = true; in find_dev_data()
289 return dev_data; in find_dev_data()
339 struct iommu_dev_data *dev_data; in pdev_pri_erratum() local
341 dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_pri_erratum()
343 return dev_data->errata & (1 << erratum) ? true : false; in pdev_pri_erratum()
373 struct iommu_dev_data *dev_data; in iommu_init_device() local
383 dev_data = find_dev_data(devid); in iommu_init_device()
384 if (!dev_data) in iommu_init_device()
387 dev_data->pdev = setup_aliases(dev); in iommu_init_device()
399 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
400 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
403 dev_iommu_priv_set(dev, dev_data); in iommu_init_device()
424 struct iommu_dev_data *dev_data; in amd_iommu_uninit_device() local
426 dev_data = dev_iommu_priv_get(dev); in amd_iommu_uninit_device()
427 if (!dev_data) in amd_iommu_uninit_device()
430 if (dev_data->domain) in amd_iommu_uninit_device()
490 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_hw_error() local
503 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_hw_error()
505 if (dev_data && __ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_hw_error()
520 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_fault() local
534 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_fault()
536 if (dev_data && __ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_fault()
552 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_page_fault() local
558 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_page_fault()
560 if (dev_data && __ratelimit(&dev_data->rs)) { in amd_iommu_report_page_fault()
1242 static int device_flush_iotlb(struct iommu_dev_data *dev_data, in device_flush_iotlb() argument
1249 qdep = dev_data->ats.qdep; in device_flush_iotlb()
1250 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1252 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); in device_flush_iotlb()
1267 static int device_flush_dte(struct iommu_dev_data *dev_data) in device_flush_dte() argument
1273 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1275 if (dev_data->pdev) in device_flush_dte()
1276 ret = pci_for_each_dma_alias(dev_data->pdev, in device_flush_dte()
1279 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1283 alias = amd_iommu_alias_table[dev_data->devid]; in device_flush_dte()
1284 if (alias != dev_data->devid) { in device_flush_dte()
1290 if (dev_data->ats.enabled) in device_flush_dte()
1291 ret = device_flush_iotlb(dev_data, 0, ~0UL); in device_flush_dte()
1304 struct iommu_dev_data *dev_data; in __domain_flush_pages() local
1321 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1323 if (!dev_data->ats.enabled) in __domain_flush_pages()
1326 ret |= device_flush_iotlb(dev_data, address, size); in __domain_flush_pages()
1380 struct iommu_dev_data *dev_data; in domain_flush_devices() local
1382 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1383 device_flush_dte(dev_data); in domain_flush_devices()
1980 static void do_attach(struct iommu_dev_data *dev_data, in do_attach() argument
1987 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
1988 ats = dev_data->ats.enabled; in do_attach()
1991 dev_data->domain = domain; in do_attach()
1992 list_add(&dev_data->list, &domain->dev_list); in do_attach()
2000 set_dte_entry(dev_data->devid, domain, &pgtable, in do_attach()
2001 ats, dev_data->iommu_v2); in do_attach()
2002 clone_aliases(dev_data->pdev); in do_attach()
2004 device_flush_dte(dev_data); in do_attach()
2007 static void do_detach(struct iommu_dev_data *dev_data) in do_detach() argument
2009 struct protection_domain *domain = dev_data->domain; in do_detach()
2012 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
2015 dev_data->domain = NULL; in do_detach()
2016 list_del(&dev_data->list); in do_detach()
2017 clear_dte_entry(dev_data->devid); in do_detach()
2018 clone_aliases(dev_data->pdev); in do_detach()
2021 device_flush_dte(dev_data); in do_detach()
2110 struct iommu_dev_data *dev_data; in attach_device() local
2117 dev_data = dev_iommu_priv_get(dev); in attach_device()
2119 spin_lock(&dev_data->lock); in attach_device()
2122 if (dev_data->domain != NULL) in attach_device()
2136 if (dev_data->iommu_v2) { in attach_device()
2140 dev_data->ats.enabled = true; in attach_device()
2141 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
2142 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); in attach_device()
2146 dev_data->ats.enabled = true; in attach_device()
2147 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
2153 do_attach(dev_data, domain); in attach_device()
2165 spin_unlock(&dev_data->lock); in attach_device()
2178 struct iommu_dev_data *dev_data; in detach_device() local
2181 dev_data = dev_iommu_priv_get(dev); in detach_device()
2182 domain = dev_data->domain; in detach_device()
2186 spin_lock(&dev_data->lock); in detach_device()
2194 if (WARN_ON(!dev_data->domain)) in detach_device()
2197 do_detach(dev_data); in detach_device()
2202 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
2204 else if (dev_data->ats.enabled) in detach_device()
2207 dev_data->ats.enabled = false; in detach_device()
2210 spin_unlock(&dev_data->lock); in detach_device()
2310 struct iommu_dev_data *dev_data; in update_device_table() local
2312 list_for_each_entry(dev_data, &domain->dev_list, list) { in update_device_table()
2313 set_dte_entry(dev_data->devid, domain, pgtable, in update_device_table()
2314 dev_data->ats.enabled, dev_data->iommu_v2); in update_device_table()
2315 clone_aliases(dev_data->pdev); in update_device_table()
2515 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_detach_device() local
2526 if (dev_data->domain != NULL) in amd_iommu_detach_device()
2536 dev_data->use_vapic = 0; in amd_iommu_detach_device()
2546 struct iommu_dev_data *dev_data; in amd_iommu_attach_device() local
2553 dev_data = dev_iommu_priv_get(dev); in amd_iommu_attach_device()
2554 dev_data->defer_attach = false; in amd_iommu_attach_device()
2556 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
2560 if (dev_data->domain) in amd_iommu_attach_device()
2568 dev_data->use_vapic = 1; in amd_iommu_attach_device()
2570 dev_data->use_vapic = 0; in amd_iommu_attach_device()
2712 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_is_attach_deferred() local
2714 return dev_data->defer_attach; in amd_iommu_is_attach_deferred()
2737 struct iommu_dev_data *dev_data; in amd_iommu_def_domain_type() local
2739 dev_data = dev_iommu_priv_get(dev); in amd_iommu_def_domain_type()
2740 if (!dev_data) in amd_iommu_def_domain_type()
2748 if (!mem_encrypt_active() && dev_data->iommu_v2) in amd_iommu_def_domain_type()
2873 struct iommu_dev_data *dev_data; in __flush_pasid() local
2899 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
2907 if (!dev_data->ats.enabled) in __flush_pasid()
2910 qdep = dev_data->ats.qdep; in __flush_pasid()
2911 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
2913 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, in __flush_pasid()
3073 struct iommu_dev_data *dev_data; in amd_iommu_complete_ppr() local
3077 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_complete_ppr()
3078 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
3080 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
3081 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
3090 struct iommu_dev_data *dev_data; in amd_iommu_get_v2_domain() local
3097 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_get_v2_domain()
3098 pdomain = dev_data->domain; in amd_iommu_get_v2_domain()
3101 if (pdomain == NULL && dev_data->defer_attach) { in amd_iommu_get_v2_domain()
3102 dev_data->defer_attach = false; in amd_iommu_get_v2_domain()
3123 struct iommu_dev_data *dev_data; in amd_iommu_enable_device_erratum() local
3128 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_enable_device_erratum()
3129 dev_data->errata |= (1 << erratum); in amd_iommu_enable_device_erratum()
3967 struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid); in amd_ir_set_vcpu_affinity() local
3973 if (!dev_data || !dev_data->use_vapic) in amd_ir_set_vcpu_affinity()