Lines Matching full:group
178 * removes the device from the dummy group and cannot be nested.
182 struct iommu_group *group; in vfio_iommu_group_get() local
185 group = iommu_group_get(dev); in vfio_iommu_group_get()
189 * With noiommu enabled, an IOMMU group will be created for a device in vfio_iommu_group_get()
194 if (group || !noiommu || iommu_present(dev->bus)) in vfio_iommu_group_get()
195 return group; in vfio_iommu_group_get()
197 group = iommu_group_alloc(); in vfio_iommu_group_get()
198 if (IS_ERR(group)) in vfio_iommu_group_get()
201 iommu_group_set_name(group, "vfio-noiommu"); in vfio_iommu_group_get()
202 iommu_group_set_iommudata(group, &noiommu, NULL); in vfio_iommu_group_get()
203 ret = iommu_group_add_device(group, dev); in vfio_iommu_group_get()
205 iommu_group_put(group); in vfio_iommu_group_get()
210 * Where to taint? At this point we've added an IOMMU group for a in vfio_iommu_group_get()
218 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n"); in vfio_iommu_group_get()
221 return group; in vfio_iommu_group_get()
225 void vfio_iommu_group_put(struct iommu_group *group, struct device *dev) in vfio_iommu_group_put() argument
228 if (iommu_group_get_iommudata(group) == &noiommu) in vfio_iommu_group_put()
232 iommu_group_put(group); in vfio_iommu_group_put()
333 * Group minor allocation/free - both called with vfio.group_lock held
335 static int vfio_alloc_group_minor(struct vfio_group *group) in vfio_alloc_group_minor() argument
337 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL); in vfio_alloc_group_minor()
347 static void vfio_group_get(struct vfio_group *group);
352 * it's freed via kref. Must support container/group/device being
373 static void vfio_group_unlock_and_free(struct vfio_group *group) in vfio_group_unlock_and_free() argument
378 * that the group is no longer in vfio.group_list. in vfio_group_unlock_and_free()
380 iommu_group_unregister_notifier(group->iommu_group, &group->nb); in vfio_group_unlock_and_free()
381 kfree(group); in vfio_group_unlock_and_free()
385 * Group objects - create, release, get, put, search
389 struct vfio_group *group, *tmp; in vfio_create_group() local
393 group = kzalloc(sizeof(*group), GFP_KERNEL); in vfio_create_group()
394 if (!group) in vfio_create_group()
397 kref_init(&group->kref); in vfio_create_group()
398 INIT_LIST_HEAD(&group->device_list); in vfio_create_group()
399 mutex_init(&group->device_lock); in vfio_create_group()
400 INIT_LIST_HEAD(&group->unbound_list); in vfio_create_group()
401 mutex_init(&group->unbound_lock); in vfio_create_group()
402 atomic_set(&group->container_users, 0); in vfio_create_group()
403 atomic_set(&group->opened, 0); in vfio_create_group()
404 init_waitqueue_head(&group->container_q); in vfio_create_group()
405 group->iommu_group = iommu_group; in vfio_create_group()
407 group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu); in vfio_create_group()
409 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); in vfio_create_group()
411 group->nb.notifier_call = vfio_iommu_group_notifier; in vfio_create_group()
417 * do anything unless it can find the group in vfio.group_list, so in vfio_create_group()
420 ret = iommu_group_register_notifier(iommu_group, &group->nb); in vfio_create_group()
422 kfree(group); in vfio_create_group()
428 /* Did we race creating this group? */ in vfio_create_group()
432 vfio_group_unlock_and_free(group); in vfio_create_group()
437 minor = vfio_alloc_group_minor(group); in vfio_create_group()
439 vfio_group_unlock_and_free(group); in vfio_create_group()
445 group, "%s%d", group->noiommu ? "noiommu-" : "", in vfio_create_group()
449 vfio_group_unlock_and_free(group); in vfio_create_group()
453 group->minor = minor; in vfio_create_group()
454 group->dev = dev; in vfio_create_group()
456 list_add(&group->vfio_next, &vfio.group_list); in vfio_create_group()
460 return group; in vfio_create_group()
466 struct vfio_group *group = container_of(kref, struct vfio_group, kref); in vfio_group_release() local
468 struct iommu_group *iommu_group = group->iommu_group; in vfio_group_release()
470 WARN_ON(!list_empty(&group->device_list)); in vfio_group_release()
471 WARN_ON(group->notifier.head); in vfio_group_release()
474 &group->unbound_list, unbound_next) { in vfio_group_release()
479 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor)); in vfio_group_release()
480 list_del(&group->vfio_next); in vfio_group_release()
481 vfio_free_group_minor(group->minor); in vfio_group_release()
482 vfio_group_unlock_and_free(group); in vfio_group_release()
486 static void vfio_group_put(struct vfio_group *group) in vfio_group_put() argument
488 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); in vfio_group_put()
493 struct vfio_group *group; member
502 vfio_group_put(do_work->group); in vfio_group_put_bg()
506 static void vfio_group_schedule_put(struct vfio_group *group) in vfio_group_schedule_put() argument
515 do_work->group = group; in vfio_group_schedule_put()
519 /* Assume group_lock or group reference is held */
520 static void vfio_group_get(struct vfio_group *group) in vfio_group_get() argument
522 kref_get(&group->kref); in vfio_group_get()
527 * sure the group pointer is valid under lock and get a reference.
529 static struct vfio_group *vfio_group_try_get(struct vfio_group *group) in vfio_group_try_get() argument
531 struct vfio_group *target = group; in vfio_group_try_get()
534 list_for_each_entry(group, &vfio.group_list, vfio_next) { in vfio_group_try_get()
535 if (group == target) { in vfio_group_try_get()
536 vfio_group_get(group); in vfio_group_try_get()
538 return group; in vfio_group_try_get()
549 struct vfio_group *group; in vfio_group_get_from_iommu() local
552 list_for_each_entry(group, &vfio.group_list, vfio_next) { in vfio_group_get_from_iommu()
553 if (group->iommu_group == iommu_group) { in vfio_group_get_from_iommu()
554 vfio_group_get(group); in vfio_group_get_from_iommu()
556 return group; in vfio_group_get_from_iommu()
566 struct vfio_group *group; in vfio_group_get_from_minor() local
569 group = idr_find(&vfio.group_idr, minor); in vfio_group_get_from_minor()
570 if (!group) { in vfio_group_get_from_minor()
574 vfio_group_get(group); in vfio_group_get_from_minor()
577 return group; in vfio_group_get_from_minor()
583 struct vfio_group *group; in vfio_group_get_from_dev() local
589 group = vfio_group_get_from_iommu(iommu_group); in vfio_group_get_from_dev()
592 return group; in vfio_group_get_from_dev()
598 /* Device reference always implies a group reference */
611 static struct vfio_device *vfio_group_get_device(struct vfio_group *group, in vfio_group_get_device() argument
616 mutex_lock(&group->device_lock); in vfio_group_get_device()
617 list_for_each_entry(device, &group->device_list, group_next) { in vfio_group_get_device()
619 mutex_unlock(&group->device_lock); in vfio_group_get_device()
623 mutex_unlock(&group->device_lock); in vfio_group_get_device()
630 * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
636 * then all of the downstream devices will be part of the same IOMMU group as
660 * A vfio group is viable for use by userspace if all devices are in
669 * group. The second is to test if the device exists on the group
675 struct vfio_group *group = data; in vfio_dev_viable() local
681 mutex_lock(&group->unbound_lock); in vfio_dev_viable()
682 list_for_each_entry(unbound, &group->unbound_list, unbound_next) { in vfio_dev_viable()
688 mutex_unlock(&group->unbound_lock); in vfio_dev_viable()
693 device = vfio_group_get_device(group, dev); in vfio_dev_viable()
705 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) in vfio_group_nb_add_dev() argument
710 device = vfio_group_get_device(group, dev); in vfio_group_nb_add_dev()
717 if (!atomic_read(&group->container_users)) in vfio_group_nb_add_dev()
721 dev_WARN(dev, "Device added to live group %d!\n", in vfio_group_nb_add_dev()
722 iommu_group_id(group->iommu_group)); in vfio_group_nb_add_dev()
727 static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) in vfio_group_nb_verify() argument
729 /* We don't care what happens when the group isn't in use */ in vfio_group_nb_verify()
730 if (!atomic_read(&group->container_users)) in vfio_group_nb_verify()
733 return vfio_dev_viable(dev, group); in vfio_group_nb_verify()
739 struct vfio_group *group = container_of(nb, struct vfio_group, nb); in vfio_iommu_group_notifier() local
745 * risk racing a group being removed. Ignore spurious notifies. in vfio_iommu_group_notifier()
747 group = vfio_group_try_get(group); in vfio_iommu_group_notifier()
748 if (!group) in vfio_iommu_group_notifier()
753 vfio_group_nb_add_dev(group, dev); in vfio_iommu_group_notifier()
765 dev_dbg(dev, "%s: group %d binding to driver\n", __func__, in vfio_iommu_group_notifier()
766 iommu_group_id(group->iommu_group)); in vfio_iommu_group_notifier()
769 dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__, in vfio_iommu_group_notifier()
770 iommu_group_id(group->iommu_group), dev->driver->name); in vfio_iommu_group_notifier()
771 BUG_ON(vfio_group_nb_verify(group, dev)); in vfio_iommu_group_notifier()
774 dev_dbg(dev, "%s: group %d unbinding from driver %s\n", in vfio_iommu_group_notifier()
775 __func__, iommu_group_id(group->iommu_group), in vfio_iommu_group_notifier()
779 dev_dbg(dev, "%s: group %d unbound from driver\n", __func__, in vfio_iommu_group_notifier()
780 iommu_group_id(group->iommu_group)); in vfio_iommu_group_notifier()
782 * XXX An unbound device in a live group is ok, but we'd in vfio_iommu_group_notifier()
789 mutex_lock(&group->unbound_lock); in vfio_iommu_group_notifier()
791 &group->unbound_list, unbound_next) { in vfio_iommu_group_notifier()
798 mutex_unlock(&group->unbound_lock); in vfio_iommu_group_notifier()
803 * If we're the last reference to the group, the group will be in vfio_iommu_group_notifier()
804 * released, which includes unregistering the iommu group notifier. in vfio_iommu_group_notifier()
809 vfio_group_schedule_put(group); in vfio_iommu_group_notifier()
835 struct vfio_group *group; in vfio_register_group_dev() local
848 group = vfio_group_get_from_iommu(iommu_group); in vfio_register_group_dev()
849 if (!group) { in vfio_register_group_dev()
850 group = vfio_create_group(iommu_group); in vfio_register_group_dev()
851 if (IS_ERR(group)) { in vfio_register_group_dev()
853 return PTR_ERR(group); in vfio_register_group_dev()
863 existing_device = vfio_group_get_device(group, device->dev); in vfio_register_group_dev()
865 dev_WARN(device->dev, "Device already exists on group %d\n", in vfio_register_group_dev()
868 vfio_group_put(group); in vfio_register_group_dev()
872 /* Our reference on group is moved to the device */ in vfio_register_group_dev()
873 device->group = group; in vfio_register_group_dev()
878 mutex_lock(&group->device_lock); in vfio_register_group_dev()
879 list_add(&device->group_next, &group->device_list); in vfio_register_group_dev()
880 group->dev_counter++; in vfio_register_group_dev()
881 mutex_unlock(&group->device_lock); in vfio_register_group_dev()
896 struct vfio_group *group; in vfio_device_get_from_dev() local
899 group = vfio_group_get_from_dev(dev); in vfio_device_get_from_dev()
900 if (!group) in vfio_device_get_from_dev()
903 device = vfio_group_get_device(group, dev); in vfio_device_get_from_dev()
904 vfio_group_put(group); in vfio_device_get_from_dev()
910 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, in vfio_device_get_from_name() argument
915 mutex_lock(&group->device_lock); in vfio_device_get_from_name()
916 list_for_each_entry(it, &group->device_list, group_next) { in vfio_device_get_from_name()
934 mutex_unlock(&group->device_lock); in vfio_device_get_from_name()
944 struct vfio_group *group = device->group; in vfio_unregister_group_dev() local
951 * When the device is removed from the group, the group suddenly in vfio_unregister_group_dev()
953 * completes), but it's not present in the group. This is bad news in vfio_unregister_group_dev()
954 * for any external users that need to re-acquire a group reference in vfio_unregister_group_dev()
962 mutex_lock(&group->unbound_lock); in vfio_unregister_group_dev()
963 list_add(&unbound->unbound_next, &group->unbound_list); in vfio_unregister_group_dev()
964 mutex_unlock(&group->unbound_lock); in vfio_unregister_group_dev()
991 mutex_lock(&group->device_lock); in vfio_unregister_group_dev()
993 group->dev_counter--; in vfio_unregister_group_dev()
994 mutex_unlock(&group->device_lock); in vfio_unregister_group_dev()
997 * In order to support multiple devices per group, devices can be in vfio_unregister_group_dev()
998 * plucked from the group while other devices in the group are still in vfio_unregister_group_dev()
999 * in use. The container persists with this group and those remaining in vfio_unregister_group_dev()
1001 * by binding this device to another driver while the group is still in in vfio_unregister_group_dev()
1003 * or potentially the only, device in the group there can be no other in vfio_unregister_group_dev()
1004 * in-use devices in the group. The user has done their due diligence in vfio_unregister_group_dev()
1006 * we need to make sure the group is detached from the container. in vfio_unregister_group_dev()
1010 if (list_empty(&group->device_list)) in vfio_unregister_group_dev()
1011 wait_event(group->container_q, !group->container); in vfio_unregister_group_dev()
1014 vfio_group_put(group); in vfio_unregister_group_dev()
1078 struct vfio_group *group; in __vfio_container_attach_groups() local
1081 list_for_each_entry(group, &container->group_list, container_next) { in __vfio_container_attach_groups()
1082 ret = driver->ops->attach_group(data, group->iommu_group); in __vfio_container_attach_groups()
1090 list_for_each_entry_continue_reverse(group, &container->group_list, in __vfio_container_attach_groups()
1092 driver->ops->detach_group(data, group->iommu_group); in __vfio_container_attach_groups()
1108 * the group can be assigned to specific users. Therefore, only by in vfio_ioctl_set_iommu()
1109 * adding a group to a container does the user get the privilege of in vfio_ioctl_set_iommu()
1296 * VFIO Group fd, /dev/vfio/$GROUP
1298 static void __vfio_group_unset_container(struct vfio_group *group) in __vfio_group_unset_container() argument
1300 struct vfio_container *container = group->container; in __vfio_group_unset_container()
1308 group->iommu_group); in __vfio_group_unset_container()
1310 group->container = NULL; in __vfio_group_unset_container()
1311 wake_up(&group->container_q); in __vfio_group_unset_container()
1312 list_del(&group->container_next); in __vfio_group_unset_container()
1314 /* Detaching the last group deprivileges a container, remove iommu */ in __vfio_group_unset_container()
1330 * the group, we know that still exists, therefore the only valid
1333 static int vfio_group_unset_container(struct vfio_group *group) in vfio_group_unset_container() argument
1335 int users = atomic_cmpxchg(&group->container_users, 1, 0); in vfio_group_unset_container()
1342 __vfio_group_unset_container(group); in vfio_group_unset_container()
1349 * implicitly removes the group from the container. That is, if the
1350 * group file descriptor is closed, as well as any device file descriptors,
1351 * the group is free.
1353 static void vfio_group_try_dissolve_container(struct vfio_group *group) in vfio_group_try_dissolve_container() argument
1355 if (0 == atomic_dec_if_positive(&group->container_users)) in vfio_group_try_dissolve_container()
1356 __vfio_group_unset_container(group); in vfio_group_try_dissolve_container()
1359 static int vfio_group_set_container(struct vfio_group *group, int container_fd) in vfio_group_set_container() argument
1366 if (atomic_read(&group->container_users)) in vfio_group_set_container()
1369 if (group->noiommu && !capable(CAP_SYS_RAWIO)) in vfio_group_set_container()
1389 container->noiommu != group->noiommu) { in vfio_group_set_container()
1397 group->iommu_group); in vfio_group_set_container()
1402 group->container = container; in vfio_group_set_container()
1403 container->noiommu = group->noiommu; in vfio_group_set_container()
1404 list_add(&group->container_next, &container->group_list); in vfio_group_set_container()
1406 /* Get a reference on the container and mark a user within the group */ in vfio_group_set_container()
1408 atomic_inc(&group->container_users); in vfio_group_set_container()
1416 static bool vfio_group_viable(struct vfio_group *group) in vfio_group_viable() argument
1418 return (iommu_group_for_each_dev(group->iommu_group, in vfio_group_viable()
1419 group, vfio_dev_viable) == 0); in vfio_group_viable()
1422 static int vfio_group_add_container_user(struct vfio_group *group) in vfio_group_add_container_user() argument
1424 if (!atomic_inc_not_zero(&group->container_users)) in vfio_group_add_container_user()
1427 if (group->noiommu) { in vfio_group_add_container_user()
1428 atomic_dec(&group->container_users); in vfio_group_add_container_user()
1431 if (!group->container->iommu_driver || !vfio_group_viable(group)) { in vfio_group_add_container_user()
1432 atomic_dec(&group->container_users); in vfio_group_add_container_user()
1441 static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) in vfio_group_get_device_fd() argument
1448 if (0 == atomic_read(&group->container_users) || in vfio_group_get_device_fd()
1449 !group->container->iommu_driver || !vfio_group_viable(group)) in vfio_group_get_device_fd()
1452 if (group->noiommu && !capable(CAP_SYS_RAWIO)) in vfio_group_get_device_fd()
1455 device = vfio_device_get_from_name(group, buf); in vfio_group_get_device_fd()
1495 atomic_inc(&group->container_users); in vfio_group_get_device_fd()
1499 if (group->noiommu) in vfio_group_get_device_fd()
1522 struct vfio_group *group = filep->private_data; in vfio_group_fops_unl_ioctl() local
1541 if (vfio_group_viable(group)) in vfio_group_fops_unl_ioctl()
1544 if (group->container) in vfio_group_fops_unl_ioctl()
1563 ret = vfio_group_set_container(group, fd); in vfio_group_fops_unl_ioctl()
1567 ret = vfio_group_unset_container(group); in vfio_group_fops_unl_ioctl()
1577 ret = vfio_group_get_device_fd(group, buf); in vfio_group_fops_unl_ioctl()
1588 struct vfio_group *group; in vfio_group_fops_open() local
1591 group = vfio_group_get_from_minor(iminor(inode)); in vfio_group_fops_open()
1592 if (!group) in vfio_group_fops_open()
1595 if (group->noiommu && !capable(CAP_SYS_RAWIO)) { in vfio_group_fops_open()
1596 vfio_group_put(group); in vfio_group_fops_open()
1600 /* Do we need multiple instances of the group open? Seems not. */ in vfio_group_fops_open()
1601 opened = atomic_cmpxchg(&group->opened, 0, 1); in vfio_group_fops_open()
1603 vfio_group_put(group); in vfio_group_fops_open()
1608 if (group->container) { in vfio_group_fops_open()
1609 atomic_dec(&group->opened); in vfio_group_fops_open()
1610 vfio_group_put(group); in vfio_group_fops_open()
1615 if (WARN_ON(group->notifier.head)) in vfio_group_fops_open()
1616 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); in vfio_group_fops_open()
1618 filep->private_data = group; in vfio_group_fops_open()
1625 struct vfio_group *group = filep->private_data; in vfio_group_fops_release() local
1629 vfio_group_try_dissolve_container(group); in vfio_group_fops_release()
1631 atomic_dec(&group->opened); in vfio_group_fops_release()
1633 vfio_group_put(group); in vfio_group_fops_release()
1660 vfio_group_try_dissolve_container(device->group); in vfio_device_fops_release()
1727 * - attaching group(s) to it;
1732 * 2. User space passes a group fd to an external user.
1735 * - the group is initialized;
1739 * the VFIO group from disposal before KVM exits.
1745 * vfio_group_put_external_user() to release the VFIO group.
1750 struct vfio_group *group = filep->private_data; in vfio_group_get_external_user() local
1756 ret = vfio_group_add_container_user(group); in vfio_group_get_external_user()
1760 vfio_group_get(group); in vfio_group_get_external_user()
1762 return group; in vfio_group_get_external_user()
1770 * - A VFIO group is assiciated with the device;
1771 * - IOMMU is set for the group.
1773 * increments the container user counter to prevent the VFIO group
1775 * to the VFIO group.
1777 * When the external user finishes using the VFIO group, it calls
1778 * vfio_group_put_external_user() to release the VFIO group and
1782 * Return error PTR or pointer to VFIO group.
1787 struct vfio_group *group; in vfio_group_get_external_user_from_dev() local
1790 group = vfio_group_get_from_dev(dev); in vfio_group_get_external_user_from_dev()
1791 if (!group) in vfio_group_get_external_user_from_dev()
1794 ret = vfio_group_add_container_user(group); in vfio_group_get_external_user_from_dev()
1796 vfio_group_put(group); in vfio_group_get_external_user_from_dev()
1800 return group; in vfio_group_get_external_user_from_dev()
1804 void vfio_group_put_external_user(struct vfio_group *group) in vfio_group_put_external_user() argument
1806 vfio_group_try_dissolve_container(group); in vfio_group_put_external_user()
1807 vfio_group_put(group); in vfio_group_put_external_user()
1814 struct vfio_group *group = filep->private_data; in vfio_external_group_match_file() local
1816 return (filep->f_op == &vfio_group_fops) && (group == test_group); in vfio_external_group_match_file()
1820 int vfio_external_user_iommu_id(struct vfio_group *group) in vfio_external_user_iommu_id() argument
1822 return iommu_group_id(group->iommu_group); in vfio_external_user_iommu_id()
1826 long vfio_external_check_extension(struct vfio_group *group, unsigned long arg) in vfio_external_check_extension() argument
1828 return vfio_ioctl_check_extension(group->container, arg); in vfio_external_check_extension()
1965 struct vfio_group *group; in vfio_pin_pages() local
1975 group = vfio_group_get_from_dev(dev); in vfio_pin_pages()
1976 if (!group) in vfio_pin_pages()
1979 if (group->dev_counter > 1) { in vfio_pin_pages()
1984 ret = vfio_group_add_container_user(group); in vfio_pin_pages()
1988 container = group->container; in vfio_pin_pages()
1992 group->iommu_group, user_pfn, in vfio_pin_pages()
1997 vfio_group_try_dissolve_container(group); in vfio_pin_pages()
2000 vfio_group_put(group); in vfio_pin_pages()
2017 struct vfio_group *group; in vfio_unpin_pages() local
2027 group = vfio_group_get_from_dev(dev); in vfio_unpin_pages()
2028 if (!group) in vfio_unpin_pages()
2031 ret = vfio_group_add_container_user(group); in vfio_unpin_pages()
2035 container = group->container; in vfio_unpin_pages()
2043 vfio_group_try_dissolve_container(group); in vfio_unpin_pages()
2046 vfio_group_put(group); in vfio_unpin_pages()
2053 * VFIO group.
2057 * so as to prevent the VFIO group from disposal in the middle of the call.
2058 * But it can keep the reference to the VFIO group for several calls into
2060 * After finishing using of the VFIO group, the caller needs to release the
2061 * VFIO group by calling vfio_group_put_external_user().
2063 * @group [in] : VFIO group
2072 int vfio_group_pin_pages(struct vfio_group *group, in vfio_group_pin_pages() argument
2080 if (!group || !user_iova_pfn || !phys_pfn || !npage) in vfio_group_pin_pages()
2083 if (group->dev_counter > 1) in vfio_group_pin_pages()
2089 container = group->container; in vfio_group_pin_pages()
2093 group->iommu_group, user_iova_pfn, in vfio_group_pin_pages()
2103 * Unpin a set of guest IOVA PFNs for a VFIO group.
2107 * so as to prevent the VFIO group from disposal in the middle of the call.
2108 * But it can keep the reference to the VFIO group for several calls into
2110 * After finishing using of the VFIO group, the caller needs to release the
2111 * VFIO group by calling vfio_group_put_external_user().
2113 * @group [in] : vfio group
2120 int vfio_group_unpin_pages(struct vfio_group *group, in vfio_group_unpin_pages() argument
2127 if (!group || !user_iova_pfn || !npage) in vfio_group_unpin_pages()
2133 container = group->container; in vfio_group_unpin_pages()
2158 * so as to prevent the VFIO group from disposal in the middle of the call.
2159 * But it can keep the reference to the VFIO group for several calls into
2161 * After finishing using of the VFIO group, the caller needs to release the
2162 * VFIO group by calling vfio_group_put_external_user().
2164 * @group [in] : VFIO group
2171 int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, in vfio_dma_rw() argument
2178 if (!group || !data || len <= 0) in vfio_dma_rw()
2181 container = group->container; in vfio_dma_rw()
2194 static int vfio_register_iommu_notifier(struct vfio_group *group, in vfio_register_iommu_notifier() argument
2202 ret = vfio_group_add_container_user(group); in vfio_register_iommu_notifier()
2206 container = group->container; in vfio_register_iommu_notifier()
2214 vfio_group_try_dissolve_container(group); in vfio_register_iommu_notifier()
2219 static int vfio_unregister_iommu_notifier(struct vfio_group *group, in vfio_unregister_iommu_notifier() argument
2226 ret = vfio_group_add_container_user(group); in vfio_unregister_iommu_notifier()
2230 container = group->container; in vfio_unregister_iommu_notifier()
2238 vfio_group_try_dissolve_container(group); in vfio_unregister_iommu_notifier()
2243 void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm) in vfio_group_set_kvm() argument
2245 group->kvm = kvm; in vfio_group_set_kvm()
2246 blocking_notifier_call_chain(&group->notifier, in vfio_group_set_kvm()
2251 static int vfio_register_group_notifier(struct vfio_group *group, in vfio_register_group_notifier() argument
2268 ret = vfio_group_add_container_user(group); in vfio_register_group_notifier()
2272 ret = blocking_notifier_chain_register(&group->notifier, nb); in vfio_register_group_notifier()
2278 if (!ret && set_kvm && group->kvm) in vfio_register_group_notifier()
2279 blocking_notifier_call_chain(&group->notifier, in vfio_register_group_notifier()
2280 VFIO_GROUP_NOTIFY_SET_KVM, group->kvm); in vfio_register_group_notifier()
2282 vfio_group_try_dissolve_container(group); in vfio_register_group_notifier()
2287 static int vfio_unregister_group_notifier(struct vfio_group *group, in vfio_unregister_group_notifier() argument
2292 ret = vfio_group_add_container_user(group); in vfio_unregister_group_notifier()
2296 ret = blocking_notifier_chain_unregister(&group->notifier, nb); in vfio_unregister_group_notifier()
2298 vfio_group_try_dissolve_container(group); in vfio_unregister_group_notifier()
2306 struct vfio_group *group; in vfio_register_notifier() local
2312 group = vfio_group_get_from_dev(dev); in vfio_register_notifier()
2313 if (!group) in vfio_register_notifier()
2318 ret = vfio_register_iommu_notifier(group, events, nb); in vfio_register_notifier()
2321 ret = vfio_register_group_notifier(group, events, nb); in vfio_register_notifier()
2327 vfio_group_put(group); in vfio_register_notifier()
2335 struct vfio_group *group; in vfio_unregister_notifier() local
2341 group = vfio_group_get_from_dev(dev); in vfio_unregister_notifier()
2342 if (!group) in vfio_unregister_notifier()
2347 ret = vfio_unregister_iommu_notifier(group, nb); in vfio_unregister_notifier()
2350 ret = vfio_unregister_group_notifier(group, nb); in vfio_unregister_notifier()
2356 vfio_group_put(group); in vfio_unregister_notifier()
2361 struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group) in vfio_group_iommu_domain() argument
2366 if (!group) in vfio_group_iommu_domain()
2369 container = group->container; in vfio_group_iommu_domain()
2373 group->iommu_group); in vfio_group_iommu_domain()
2411 /* /dev/vfio/$GROUP */ in vfio_init()