Lines Matching refs:encl

24 	struct sgx_encl *encl = encl_page->encl;  in __sgx_encl_eldu()  local
31 page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base); in __sgx_encl_eldu()
33 page_index = PFN_DOWN(encl->size); in __sgx_encl_eldu()
35 ret = sgx_encl_get_backing(encl, page_index, &b); in __sgx_encl_eldu()
71 struct sgx_encl *encl = encl_page->encl; in sgx_encl_eldu() local
86 list_move(&encl_page->va_page->list, &encl->va_pages); in sgx_encl_eldu()
93 static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl, in sgx_encl_load_page() argument
101 entry = xa_load(&encl->page_array, PFN_DOWN(addr)); in sgx_encl_load_page()
121 if (!(encl->secs.epc_page)) { in sgx_encl_load_page()
122 epc_page = sgx_encl_eldu(&encl->secs, NULL); in sgx_encl_load_page()
127 epc_page = sgx_encl_eldu(entry, encl->secs.epc_page); in sgx_encl_load_page()
131 encl->secs_child_cnt++; in sgx_encl_load_page()
143 struct sgx_encl *encl; in sgx_vma_fault() local
146 encl = vma->vm_private_data; in sgx_vma_fault()
153 if (unlikely(!encl)) in sgx_vma_fault()
156 mutex_lock(&encl->lock); in sgx_vma_fault()
158 entry = sgx_encl_load_page(encl, addr, vma->vm_flags); in sgx_vma_fault()
160 mutex_unlock(&encl->lock); in sgx_vma_fault()
172 mutex_unlock(&encl->lock); in sgx_vma_fault()
178 mutex_unlock(&encl->lock); in sgx_vma_fault()
185 struct sgx_encl *encl = vma->vm_private_data; in sgx_vma_open() local
192 if (unlikely(!encl)) in sgx_vma_open()
195 if (sgx_encl_mm_add(encl, vma->vm_mm)) in sgx_vma_open()
218 int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, in sgx_encl_may_map() argument
226 XA_STATE(xas, &encl->page_array, PFN_DOWN(start)); in sgx_encl_may_map()
235 mutex_lock(&encl->lock); in sgx_encl_may_map()
247 mutex_unlock(&encl->lock); in sgx_encl_may_map()
251 mutex_lock(&encl->lock); in sgx_encl_may_map()
256 mutex_unlock(&encl->lock); in sgx_encl_may_map()
267 static int sgx_encl_debug_read(struct sgx_encl *encl, struct sgx_encl_page *page, in sgx_encl_debug_read() argument
281 static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *page, in sgx_encl_debug_write() argument
297 static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl, in sgx_encl_reserve_page() argument
304 mutex_lock(&encl->lock); in sgx_encl_reserve_page()
306 entry = sgx_encl_load_page(encl, addr, vm_flags); in sgx_encl_reserve_page()
310 mutex_unlock(&encl->lock); in sgx_encl_reserve_page()
314 mutex_unlock(&encl->lock); in sgx_encl_reserve_page()
322 struct sgx_encl *encl = vma->vm_private_data; in sgx_vma_access() local
335 if (!encl) in sgx_vma_access()
338 if (!test_bit(SGX_ENCL_DEBUG, &encl->flags)) in sgx_vma_access()
342 entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK, in sgx_vma_access()
354 ret = sgx_encl_debug_read(encl, entry, align, data); in sgx_vma_access()
360 ret = sgx_encl_debug_write(encl, entry, align, data); in sgx_vma_access()
368 mutex_unlock(&encl->lock); in sgx_vma_access()
393 struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount); in sgx_encl_release() local
398 xa_for_each(&encl->page_array, index, entry) { in sgx_encl_release()
408 encl->secs_child_cnt--; in sgx_encl_release()
415 xa_destroy(&encl->page_array); in sgx_encl_release()
417 if (!encl->secs_child_cnt && encl->secs.epc_page) { in sgx_encl_release()
418 sgx_encl_free_epc_page(encl->secs.epc_page); in sgx_encl_release()
419 encl->secs.epc_page = NULL; in sgx_encl_release()
422 while (!list_empty(&encl->va_pages)) { in sgx_encl_release()
423 va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, in sgx_encl_release()
430 if (encl->backing) in sgx_encl_release()
431 fput(encl->backing); in sgx_encl_release()
433 cleanup_srcu_struct(&encl->srcu); in sgx_encl_release()
435 WARN_ON_ONCE(!list_empty(&encl->mm_list)); in sgx_encl_release()
438 WARN_ON_ONCE(encl->secs_child_cnt); in sgx_encl_release()
439 WARN_ON_ONCE(encl->secs.epc_page); in sgx_encl_release()
441 kfree(encl); in sgx_encl_release()
457 spin_lock(&encl_mm->encl->mm_lock); in sgx_mmu_notifier_release()
458 list_for_each_entry(tmp, &encl_mm->encl->mm_list, list) { in sgx_mmu_notifier_release()
464 spin_unlock(&encl_mm->encl->mm_lock); in sgx_mmu_notifier_release()
467 synchronize_srcu(&encl_mm->encl->srcu); in sgx_mmu_notifier_release()
477 kref_put(&encl_mm->encl->refcount, sgx_encl_release); in sgx_mmu_notifier_free()
487 static struct sgx_encl_mm *sgx_encl_find_mm(struct sgx_encl *encl, in sgx_encl_find_mm() argument
494 idx = srcu_read_lock(&encl->srcu); in sgx_encl_find_mm()
496 list_for_each_entry_rcu(tmp, &encl->mm_list, list) { in sgx_encl_find_mm()
503 srcu_read_unlock(&encl->srcu, idx); in sgx_encl_find_mm()
508 int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm) in sgx_encl_mm_add() argument
525 if (sgx_encl_find_mm(encl, mm)) in sgx_encl_mm_add()
533 kref_get(&encl->refcount); in sgx_encl_mm_add()
534 encl_mm->encl = encl; in sgx_encl_mm_add()
544 spin_lock(&encl->mm_lock); in sgx_encl_mm_add()
545 list_add_rcu(&encl_mm->list, &encl->mm_list); in sgx_encl_mm_add()
548 encl->mm_list_version++; in sgx_encl_mm_add()
549 spin_unlock(&encl->mm_lock); in sgx_encl_mm_add()
554 static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl, in sgx_encl_get_backing_page() argument
557 struct inode *inode = encl->backing->f_path.dentry->d_inode; in sgx_encl_get_backing_page()
577 int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, in sgx_encl_get_backing() argument
580 pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5); in sgx_encl_get_backing()
584 contents = sgx_encl_get_backing_page(encl, page_index); in sgx_encl_get_backing()
588 pcmd = sgx_encl_get_backing_page(encl, pcmd_index); in sgx_encl_get_backing()
649 struct sgx_encl *encl = page->encl; in sgx_encl_test_and_clear_young() local
657 if (encl != vma->vm_private_data) in sgx_encl_test_and_clear_young()