Lines Matching refs:container
120 static long tce_iommu_mm_set(struct tce_container *container) in tce_iommu_mm_set() argument
122 if (container->mm) { in tce_iommu_mm_set()
123 if (container->mm == current->mm) in tce_iommu_mm_set()
128 container->mm = current->mm; in tce_iommu_mm_set()
129 atomic_inc(&container->mm->mm_count); in tce_iommu_mm_set()
134 static long tce_iommu_prereg_free(struct tce_container *container, in tce_iommu_prereg_free() argument
139 ret = mm_iommu_put(container->mm, tcemem->mem); in tce_iommu_prereg_free()
149 static long tce_iommu_unregister_pages(struct tce_container *container, in tce_iommu_unregister_pages() argument
159 mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT); in tce_iommu_unregister_pages()
163 list_for_each_entry(tcemem, &container->prereg_list, next) { in tce_iommu_unregister_pages()
173 return tce_iommu_prereg_free(container, tcemem); in tce_iommu_unregister_pages()
176 static long tce_iommu_register_pages(struct tce_container *container, in tce_iommu_register_pages() argument
188 mem = mm_iommu_find(container->mm, vaddr, entries); in tce_iommu_register_pages()
190 list_for_each_entry(tcemem, &container->prereg_list, next) { in tce_iommu_register_pages()
196 ret = mm_iommu_get(container->mm, vaddr, entries, &mem); in tce_iommu_register_pages()
202 mm_iommu_put(container->mm, mem); in tce_iommu_register_pages()
207 list_add(&tcemem->next, &container->prereg_list); in tce_iommu_register_pages()
209 container->enabled = true; in tce_iommu_register_pages()
224 static inline bool tce_groups_attached(struct tce_container *container) in tce_groups_attached() argument
226 return !list_empty(&container->group_list); in tce_groups_attached()
229 static long tce_iommu_find_table(struct tce_container *container, in tce_iommu_find_table() argument
235 struct iommu_table *tbl = container->tables[i]; in tce_iommu_find_table()
252 static int tce_iommu_find_free_table(struct tce_container *container) in tce_iommu_find_free_table() argument
257 if (!container->tables[i]) in tce_iommu_find_free_table()
264 static int tce_iommu_enable(struct tce_container *container) in tce_iommu_enable() argument
271 if (container->enabled) in tce_iommu_enable()
303 if (!tce_groups_attached(container)) in tce_iommu_enable()
306 tcegrp = list_first_entry(&container->group_list, in tce_iommu_enable()
315 ret = tce_iommu_mm_set(container); in tce_iommu_enable()
320 ret = try_increment_locked_vm(container->mm, locked); in tce_iommu_enable()
324 container->locked_pages = locked; in tce_iommu_enable()
326 container->enabled = true; in tce_iommu_enable()
331 static void tce_iommu_disable(struct tce_container *container) in tce_iommu_disable() argument
333 if (!container->enabled) in tce_iommu_disable()
336 container->enabled = false; in tce_iommu_disable()
338 BUG_ON(!container->mm); in tce_iommu_disable()
339 decrement_locked_vm(container->mm, container->locked_pages); in tce_iommu_disable()
344 struct tce_container *container; in tce_iommu_open() local
351 container = kzalloc(sizeof(*container), GFP_KERNEL); in tce_iommu_open()
352 if (!container) in tce_iommu_open()
355 mutex_init(&container->lock); in tce_iommu_open()
356 INIT_LIST_HEAD_RCU(&container->group_list); in tce_iommu_open()
357 INIT_LIST_HEAD_RCU(&container->prereg_list); in tce_iommu_open()
359 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU; in tce_iommu_open()
361 return container; in tce_iommu_open()
364 static int tce_iommu_clear(struct tce_container *container,
367 static void tce_iommu_free_table(struct tce_container *container,
372 struct tce_container *container = iommu_data; in tce_iommu_release() local
376 while (tce_groups_attached(container)) { in tce_iommu_release()
377 tcegrp = list_first_entry(&container->group_list, in tce_iommu_release()
387 struct iommu_table *tbl = container->tables[i]; in tce_iommu_release()
392 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); in tce_iommu_release()
393 tce_iommu_free_table(container, tbl); in tce_iommu_release()
396 while (!list_empty(&container->prereg_list)) { in tce_iommu_release()
399 tcemem = list_first_entry(&container->prereg_list, in tce_iommu_release()
401 WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem)); in tce_iommu_release()
404 tce_iommu_disable(container); in tce_iommu_release()
405 if (container->mm) in tce_iommu_release()
406 mmdrop(container->mm); in tce_iommu_release()
407 mutex_destroy(&container->lock); in tce_iommu_release()
409 kfree(container); in tce_iommu_release()
412 static void tce_iommu_unuse_page(struct tce_container *container, in tce_iommu_unuse_page() argument
421 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, in tce_iommu_prereg_ua_to_hpa() argument
428 mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift); in tce_iommu_prereg_ua_to_hpa()
441 static void tce_iommu_unuse_page_v2(struct tce_container *container, in tce_iommu_unuse_page_v2() argument
452 ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua), in tce_iommu_unuse_page_v2()
463 static int tce_iommu_clear(struct tce_container *container, in tce_iommu_clear() argument
483 if (container->v2) { in tce_iommu_clear()
484 tce_iommu_unuse_page_v2(container, tbl, entry); in tce_iommu_clear()
488 tce_iommu_unuse_page(container, oldhpa); in tce_iommu_clear()
508 static long tce_iommu_build(struct tce_container *container, in tce_iommu_build() argument
535 tce_iommu_unuse_page(container, hpa); in tce_iommu_build()
543 tce_iommu_unuse_page(container, hpa); in tce_iommu_build()
549 tce_iommu_clear(container, tbl, entry, i); in tce_iommu_build()
554 static long tce_iommu_build_v2(struct tce_container *container, in tce_iommu_build_v2() argument
568 ret = tce_iommu_prereg_ua_to_hpa(container, in tce_iommu_build_v2()
590 tce_iommu_unuse_page_v2(container, tbl, entry + i); in tce_iommu_build_v2()
598 tce_iommu_unuse_page_v2(container, tbl, entry + i); in tce_iommu_build_v2()
606 tce_iommu_clear(container, tbl, entry, i); in tce_iommu_build_v2()
611 static long tce_iommu_create_table(struct tce_container *container, in tce_iommu_create_table() argument
626 ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT); in tce_iommu_create_table()
639 static void tce_iommu_free_table(struct tce_container *container, in tce_iommu_free_table() argument
645 decrement_locked_vm(container->mm, pages); in tce_iommu_free_table()
648 static long tce_iommu_create_window(struct tce_container *container, in tce_iommu_create_window() argument
657 num = tce_iommu_find_free_table(container); in tce_iommu_create_window()
662 tcegrp = list_first_entry(&container->group_list, in tce_iommu_create_window()
677 ret = tce_iommu_create_table(container, table_group, num, in tce_iommu_create_window()
688 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_create_window()
696 container->tables[num] = tbl; in tce_iommu_create_window()
704 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_create_window()
708 tce_iommu_free_table(container, tbl); in tce_iommu_create_window()
713 static long tce_iommu_remove_window(struct tce_container *container, in tce_iommu_remove_window() argument
721 num = tce_iommu_find_table(container, start_addr, &tbl); in tce_iommu_remove_window()
728 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_remove_window()
745 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); in tce_iommu_remove_window()
746 tce_iommu_free_table(container, tbl); in tce_iommu_remove_window()
747 container->tables[num] = NULL; in tce_iommu_remove_window()
752 static long tce_iommu_create_default_window(struct tce_container *container) in tce_iommu_create_default_window() argument
759 if (!container->def_window_pending) in tce_iommu_create_default_window()
762 if (!tce_groups_attached(container)) in tce_iommu_create_default_window()
765 tcegrp = list_first_entry(&container->group_list, in tce_iommu_create_default_window()
771 ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K, in tce_iommu_create_default_window()
776 container->def_window_pending = false; in tce_iommu_create_default_window()
784 struct tce_container *container = iommu_data; in tce_iommu_ioctl() local
807 BUG_ON(!container); in tce_iommu_ioctl()
808 if (container->mm && container->mm != current->mm) in tce_iommu_ioctl()
817 if (!tce_groups_attached(container)) in tce_iommu_ioctl()
820 tcegrp = list_first_entry(&container->group_list, in tce_iommu_ioctl()
842 container->v2) { in tce_iommu_ioctl()
866 if (!container->enabled) in tce_iommu_ioctl()
881 ret = tce_iommu_create_default_window(container); in tce_iommu_ioctl()
885 num = tce_iommu_find_table(container, param.iova, &tbl); in tce_iommu_ioctl()
910 if (container->v2) in tce_iommu_ioctl()
911 ret = tce_iommu_build_v2(container, tbl, in tce_iommu_ioctl()
917 ret = tce_iommu_build(container, tbl, in tce_iommu_ioctl()
932 if (!container->enabled) in tce_iommu_ioctl()
948 ret = tce_iommu_create_default_window(container); in tce_iommu_ioctl()
952 num = tce_iommu_find_table(container, param.iova, &tbl); in tce_iommu_ioctl()
964 ret = tce_iommu_clear(container, tbl, in tce_iommu_ioctl()
974 if (!container->v2) in tce_iommu_ioctl()
980 ret = tce_iommu_mm_set(container); in tce_iommu_ioctl()
994 mutex_lock(&container->lock); in tce_iommu_ioctl()
995 ret = tce_iommu_register_pages(container, param.vaddr, in tce_iommu_ioctl()
997 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1004 if (!container->v2) in tce_iommu_ioctl()
1007 if (!container->mm) in tce_iommu_ioctl()
1023 mutex_lock(&container->lock); in tce_iommu_ioctl()
1024 ret = tce_iommu_unregister_pages(container, param.vaddr, in tce_iommu_ioctl()
1026 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1031 if (container->v2) in tce_iommu_ioctl()
1034 mutex_lock(&container->lock); in tce_iommu_ioctl()
1035 ret = tce_iommu_enable(container); in tce_iommu_ioctl()
1036 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1041 if (container->v2) in tce_iommu_ioctl()
1044 mutex_lock(&container->lock); in tce_iommu_ioctl()
1045 tce_iommu_disable(container); in tce_iommu_ioctl()
1046 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1053 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_ioctl()
1065 if (!container->v2) in tce_iommu_ioctl()
1068 ret = tce_iommu_mm_set(container); in tce_iommu_ioctl()
1072 if (!tce_groups_attached(container)) in tce_iommu_ioctl()
1087 mutex_lock(&container->lock); in tce_iommu_ioctl()
1089 ret = tce_iommu_create_default_window(container); in tce_iommu_ioctl()
1091 ret = tce_iommu_create_window(container, in tce_iommu_ioctl()
1096 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1106 if (!container->v2) in tce_iommu_ioctl()
1109 ret = tce_iommu_mm_set(container); in tce_iommu_ioctl()
1113 if (!tce_groups_attached(container)) in tce_iommu_ioctl()
1128 if (container->def_window_pending && !remove.start_addr) { in tce_iommu_ioctl()
1129 container->def_window_pending = false; in tce_iommu_ioctl()
1133 mutex_lock(&container->lock); in tce_iommu_ioctl()
1135 ret = tce_iommu_remove_window(container, remove.start_addr); in tce_iommu_ioctl()
1137 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1146 static void tce_iommu_release_ownership(struct tce_container *container, in tce_iommu_release_ownership() argument
1152 struct iommu_table *tbl = container->tables[i]; in tce_iommu_release_ownership()
1157 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); in tce_iommu_release_ownership()
1161 container->tables[i] = NULL; in tce_iommu_release_ownership()
1165 static int tce_iommu_take_ownership(struct tce_container *container, in tce_iommu_take_ownership() argument
1187 container->tables[i] = table_group->tables[i]; in tce_iommu_take_ownership()
1192 static void tce_iommu_release_ownership_ddw(struct tce_container *container, in tce_iommu_release_ownership_ddw() argument
1208 static long tce_iommu_take_ownership_ddw(struct tce_container *container, in tce_iommu_take_ownership_ddw() argument
1223 struct iommu_table *tbl = container->tables[i]; in tce_iommu_take_ownership_ddw()
1248 struct tce_container *container = iommu_data; in tce_iommu_attach_group() local
1252 mutex_lock(&container->lock); in tce_iommu_attach_group()
1262 if (tce_groups_attached(container) && (!table_group->ops || in tce_iommu_attach_group()
1270 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_attach_group()
1298 if (container->v2) { in tce_iommu_attach_group()
1302 ret = tce_iommu_take_ownership(container, table_group); in tce_iommu_attach_group()
1304 if (!container->v2) { in tce_iommu_attach_group()
1308 ret = tce_iommu_take_ownership_ddw(container, table_group); in tce_iommu_attach_group()
1309 if (!tce_groups_attached(container) && !container->tables[0]) in tce_iommu_attach_group()
1310 container->def_window_pending = true; in tce_iommu_attach_group()
1315 list_add(&tcegrp->next, &container->group_list); in tce_iommu_attach_group()
1322 mutex_unlock(&container->lock); in tce_iommu_attach_group()
1330 struct tce_container *container = iommu_data; in tce_iommu_detach_group() local
1335 mutex_lock(&container->lock); in tce_iommu_detach_group()
1337 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_detach_group()
1357 tce_iommu_release_ownership(container, table_group); in tce_iommu_detach_group()
1359 tce_iommu_release_ownership_ddw(container, table_group); in tce_iommu_detach_group()
1362 mutex_unlock(&container->lock); in tce_iommu_detach_group()