Lines Matching refs:dev_state

69 	struct device_state *dev_state;  member
84 static void free_pasid_states(struct device_state *dev_state);
88 struct device_state *dev_state; in __get_device_state() local
90 list_for_each_entry(dev_state, &state_list, list) { in __get_device_state()
91 if (dev_state->sbdf == sbdf) in __get_device_state()
92 return dev_state; in __get_device_state()
100 struct device_state *dev_state; in get_device_state() local
104 dev_state = __get_device_state(sbdf); in get_device_state()
105 if (dev_state != NULL) in get_device_state()
106 atomic_inc(&dev_state->count); in get_device_state()
109 return dev_state; in get_device_state()
112 static void free_device_state(struct device_state *dev_state) in free_device_state() argument
117 free_pasid_states(dev_state); in free_device_state()
123 wait_event(dev_state->wq, !atomic_read(&dev_state->count)); in free_device_state()
129 group = iommu_group_get(&dev_state->pdev->dev); in free_device_state()
133 iommu_detach_group(dev_state->domain, group); in free_device_state()
138 iommu_domain_free(dev_state->domain); in free_device_state()
141 kfree(dev_state); in free_device_state()
144 static void put_device_state(struct device_state *dev_state) in put_device_state() argument
146 if (atomic_dec_and_test(&dev_state->count)) in put_device_state()
147 wake_up(&dev_state->wq); in put_device_state()
151 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state, in __get_pasid_state_ptr() argument
157 level = dev_state->pasid_levels; in __get_pasid_state_ptr()
158 root = dev_state->states; in __get_pasid_state_ptr()
184 static int set_pasid_state(struct device_state *dev_state, in set_pasid_state() argument
192 spin_lock_irqsave(&dev_state->lock, flags); in set_pasid_state()
193 ptr = __get_pasid_state_ptr(dev_state, pasid, true); in set_pasid_state()
208 spin_unlock_irqrestore(&dev_state->lock, flags); in set_pasid_state()
213 static void clear_pasid_state(struct device_state *dev_state, u32 pasid) in clear_pasid_state() argument
218 spin_lock_irqsave(&dev_state->lock, flags); in clear_pasid_state()
219 ptr = __get_pasid_state_ptr(dev_state, pasid, true); in clear_pasid_state()
227 spin_unlock_irqrestore(&dev_state->lock, flags); in clear_pasid_state()
230 static struct pasid_state *get_pasid_state(struct device_state *dev_state, in get_pasid_state() argument
236 spin_lock_irqsave(&dev_state->lock, flags); in get_pasid_state()
237 ptr = __get_pasid_state_ptr(dev_state, pasid, false); in get_pasid_state()
247 spin_unlock_irqrestore(&dev_state->lock, flags); in get_pasid_state()
318 static void free_pasid_states(struct device_state *dev_state) in free_pasid_states() argument
323 for (i = 0; i < dev_state->max_pasids; ++i) { in free_pasid_states()
324 pasid_state = get_pasid_state(dev_state, i); in free_pasid_states()
340 put_device_state(dev_state); in free_pasid_states()
343 if (dev_state->pasid_levels == 2) in free_pasid_states()
344 free_pasid_states_level2(dev_state->states); in free_pasid_states()
345 else if (dev_state->pasid_levels == 1) in free_pasid_states()
346 free_pasid_states_level1(dev_state->states); in free_pasid_states()
348 BUG_ON(dev_state->pasid_levels != 0); in free_pasid_states()
350 free_page((unsigned long)dev_state->states); in free_pasid_states()
363 struct device_state *dev_state; in mn_invalidate_range() local
366 dev_state = pasid_state->device_state; in mn_invalidate_range()
369 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, in mn_invalidate_range()
372 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid); in mn_invalidate_range()
378 struct device_state *dev_state; in mn_release() local
384 dev_state = pasid_state->device_state; in mn_release()
387 if (run_inv_ctx_cb && dev_state->inv_ctx_cb) in mn_release()
388 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid); in mn_release()
408 static void finish_pri_tag(struct device_state *dev_state, in finish_pri_tag() argument
417 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid, in finish_pri_tag()
429 if (!fault->dev_state->inv_ppr_cb) { in handle_fault_error()
434 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev, in handle_fault_error()
505 finish_pri_tag(fault->dev_state, fault->state, fault->tag); in do_fault()
516 struct device_state *dev_state; in ppr_notifier() local
544 dev_state = get_device_state(iommu_fault->sbdf); in ppr_notifier()
545 if (dev_state == NULL) in ppr_notifier()
548 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid); in ppr_notifier()
551 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid, in ppr_notifier()
565 finish_pri_tag(dev_state, pasid_state, tag); in ppr_notifier()
569 fault->dev_state = dev_state; in ppr_notifier()
587 put_device_state(dev_state); in ppr_notifier()
601 struct device_state *dev_state; in amd_iommu_bind_pasid() local
612 dev_state = get_device_state(sbdf); in amd_iommu_bind_pasid()
614 if (dev_state == NULL) in amd_iommu_bind_pasid()
618 if (pasid >= dev_state->max_pasids) in amd_iommu_bind_pasid()
633 pasid_state->device_state = dev_state; in amd_iommu_bind_pasid()
644 ret = set_pasid_state(dev_state, pasid_state, pasid); in amd_iommu_bind_pasid()
648 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid, in amd_iommu_bind_pasid()
666 clear_pasid_state(dev_state, pasid); in amd_iommu_bind_pasid()
676 put_device_state(dev_state); in amd_iommu_bind_pasid()
685 struct device_state *dev_state; in amd_iommu_unbind_pasid() local
694 dev_state = get_device_state(sbdf); in amd_iommu_unbind_pasid()
695 if (dev_state == NULL) in amd_iommu_unbind_pasid()
698 if (pasid >= dev_state->max_pasids) in amd_iommu_unbind_pasid()
701 pasid_state = get_pasid_state(dev_state, pasid); in amd_iommu_unbind_pasid()
711 clear_pasid_state(dev_state, pasid_state->pasid); in amd_iommu_unbind_pasid()
723 put_device_state(dev_state); in amd_iommu_unbind_pasid()
726 put_device_state(dev_state); in amd_iommu_unbind_pasid()
732 struct device_state *dev_state; in amd_iommu_init_device() local
755 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL); in amd_iommu_init_device()
756 if (dev_state == NULL) in amd_iommu_init_device()
759 spin_lock_init(&dev_state->lock); in amd_iommu_init_device()
760 init_waitqueue_head(&dev_state->wq); in amd_iommu_init_device()
761 dev_state->pdev = pdev; in amd_iommu_init_device()
762 dev_state->sbdf = sbdf; in amd_iommu_init_device()
765 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9) in amd_iommu_init_device()
766 dev_state->pasid_levels += 1; in amd_iommu_init_device()
768 atomic_set(&dev_state->count, 1); in amd_iommu_init_device()
769 dev_state->max_pasids = pasids; in amd_iommu_init_device()
772 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL); in amd_iommu_init_device()
773 if (dev_state->states == NULL) in amd_iommu_init_device()
776 dev_state->domain = iommu_domain_alloc(&pci_bus_type); in amd_iommu_init_device()
777 if (dev_state->domain == NULL) in amd_iommu_init_device()
781 dev_state->domain->type = IOMMU_DOMAIN_IDENTITY; in amd_iommu_init_device()
782 amd_iommu_domain_direct_map(dev_state->domain); in amd_iommu_init_device()
784 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids); in amd_iommu_init_device()
794 ret = iommu_attach_group(dev_state->domain, group); in amd_iommu_init_device()
808 list_add_tail(&dev_state->list, &state_list); in amd_iommu_init_device()
818 iommu_domain_free(dev_state->domain); in amd_iommu_init_device()
821 free_page((unsigned long)dev_state->states); in amd_iommu_init_device()
824 kfree(dev_state); in amd_iommu_init_device()
832 struct device_state *dev_state; in amd_iommu_free_device() local
843 dev_state = __get_device_state(sbdf); in amd_iommu_free_device()
844 if (dev_state == NULL) { in amd_iommu_free_device()
849 list_del(&dev_state->list); in amd_iommu_free_device()
853 put_device_state(dev_state); in amd_iommu_free_device()
854 free_device_state(dev_state); in amd_iommu_free_device()
861 struct device_state *dev_state; in amd_iommu_set_invalid_ppr_cb() local
874 dev_state = __get_device_state(sbdf); in amd_iommu_set_invalid_ppr_cb()
875 if (dev_state == NULL) in amd_iommu_set_invalid_ppr_cb()
878 dev_state->inv_ppr_cb = cb; in amd_iommu_set_invalid_ppr_cb()
892 struct device_state *dev_state; in amd_iommu_set_invalidate_ctx_cb() local
905 dev_state = __get_device_state(sbdf); in amd_iommu_set_invalidate_ctx_cb()
906 if (dev_state == NULL) in amd_iommu_set_invalidate_ctx_cb()
909 dev_state->inv_ctx_cb = cb; in amd_iommu_set_invalidate_ctx_cb()
950 struct device_state *dev_state, *next; in amd_iommu_v2_exit() local
967 list_for_each_entry_safe(dev_state, next, &state_list, list) { in amd_iommu_v2_exit()
970 put_device_state(dev_state); in amd_iommu_v2_exit()
971 list_del(&dev_state->list); in amd_iommu_v2_exit()
972 list_add_tail(&dev_state->list, &freelist); in amd_iommu_v2_exit()
981 list_for_each_entry_safe(dev_state, next, &freelist, list) { in amd_iommu_v2_exit()
982 list_del(&dev_state->list); in amd_iommu_v2_exit()
983 free_device_state(dev_state); in amd_iommu_v2_exit()