Lines Matching refs:ne_enclave
469 static bool ne_donated_cpu(struct ne_enclave *ne_enclave, unsigned int cpu) in ne_donated_cpu() argument
471 if (cpumask_test_cpu(cpu, ne_enclave->vcpu_ids)) in ne_donated_cpu()
516 static int ne_set_enclave_threads_per_core(struct ne_enclave *ne_enclave, in ne_set_enclave_threads_per_core() argument
535 if (core_id >= ne_enclave->nr_parent_vm_cores) { in ne_set_enclave_threads_per_core()
543 cpumask_set_cpu(cpu, ne_enclave->threads_per_core[core_id]); in ne_set_enclave_threads_per_core()
562 static int ne_get_cpu_from_cpu_pool(struct ne_enclave *ne_enclave, u32 *vcpu_id) in ne_get_cpu_from_cpu_pool() argument
574 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) in ne_get_cpu_from_cpu_pool()
575 for_each_cpu(cpu, ne_enclave->threads_per_core[i]) in ne_get_cpu_from_cpu_pool()
576 if (!ne_donated_cpu(ne_enclave, cpu)) { in ne_get_cpu_from_cpu_pool()
590 rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, *vcpu_id); in ne_get_cpu_from_cpu_pool()
594 *vcpu_id = cpumask_any(ne_enclave->threads_per_core[core_id]); in ne_get_cpu_from_cpu_pool()
641 static int ne_check_cpu_in_cpu_pool(struct ne_enclave *ne_enclave, u32 vcpu_id) in ne_check_cpu_in_cpu_pool() argument
647 if (ne_donated_cpu(ne_enclave, vcpu_id)) { in ne_check_cpu_in_cpu_pool()
658 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) in ne_check_cpu_in_cpu_pool()
659 if (cpumask_test_cpu(vcpu_id, ne_enclave->threads_per_core[i])) in ne_check_cpu_in_cpu_pool()
671 rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, vcpu_id); in ne_check_cpu_in_cpu_pool()
695 static int ne_add_vcpu_ioctl(struct ne_enclave *ne_enclave, u32 vcpu_id) in ne_add_vcpu_ioctl() argument
702 if (ne_enclave->mm != current->mm) in ne_add_vcpu_ioctl()
705 slot_add_vcpu_req.slot_uid = ne_enclave->slot_uid; in ne_add_vcpu_ioctl()
718 cpumask_set_cpu(vcpu_id, ne_enclave->vcpu_ids); in ne_add_vcpu_ioctl()
720 ne_enclave->nr_vcpus++; in ne_add_vcpu_ioctl()
737 static int ne_sanity_check_user_mem_region(struct ne_enclave *ne_enclave, in ne_sanity_check_user_mem_region() argument
742 if (ne_enclave->mm != current->mm) in ne_sanity_check_user_mem_region()
768 list_for_each_entry(ne_mem_region, &ne_enclave->mem_regions_list, in ne_sanity_check_user_mem_region()
799 static int ne_sanity_check_user_mem_region_page(struct ne_enclave *ne_enclave, in ne_sanity_check_user_mem_region_page() argument
816 if (ne_enclave->numa_node != page_to_nid(mem_region_page)) { in ne_sanity_check_user_mem_region_page()
819 ne_enclave->numa_node); in ne_sanity_check_user_mem_region_page()
838 static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave, in ne_set_user_memory_region_ioctl() argument
851 rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region); in ne_set_user_memory_region_ioctl()
900 rc = ne_sanity_check_user_mem_region_page(ne_enclave, ne_mem_region->pages[i]); in ne_set_user_memory_region_ioctl()
923 if ((ne_enclave->nr_mem_regions + nr_phys_contig_mem_regions) > in ne_set_user_memory_region_ioctl()
924 ne_enclave->max_mem_regions) { in ne_set_user_memory_region_ioctl()
927 ne_enclave->max_mem_regions); in ne_set_user_memory_region_ioctl()
960 list_add(&ne_mem_region->mem_region_list_entry, &ne_enclave->mem_regions_list); in ne_set_user_memory_region_ioctl()
966 slot_add_mem_req.slot_uid = ne_enclave->slot_uid; in ne_set_user_memory_region_ioctl()
986 ne_enclave->mem_size += slot_add_mem_req.size; in ne_set_user_memory_region_ioctl()
987 ne_enclave->nr_mem_regions++; in ne_set_user_memory_region_ioctl()
1016 static int ne_start_enclave_ioctl(struct ne_enclave *ne_enclave, in ne_start_enclave_ioctl() argument
1026 if (!ne_enclave->nr_mem_regions) { in ne_start_enclave_ioctl()
1033 if (ne_enclave->mem_size < NE_MIN_ENCLAVE_MEM_SIZE) { in ne_start_enclave_ioctl()
1041 if (!ne_enclave->nr_vcpus) { in ne_start_enclave_ioctl()
1048 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) in ne_start_enclave_ioctl()
1049 for_each_cpu(cpu, ne_enclave->threads_per_core[i]) in ne_start_enclave_ioctl()
1050 if (!cpumask_test_cpu(cpu, ne_enclave->vcpu_ids)) { in ne_start_enclave_ioctl()
1059 enclave_start_req.slot_uid = ne_enclave->slot_uid; in ne_start_enclave_ioctl()
1071 ne_enclave->state = NE_STATE_RUNNING; in ne_start_enclave_ioctl()
1091 struct ne_enclave *ne_enclave = file->private_data; in ne_enclave_ioctl() local
1101 mutex_lock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1103 if (ne_enclave->state != NE_STATE_INIT) { in ne_enclave_ioctl()
1107 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1112 if (vcpu_id >= (ne_enclave->nr_parent_vm_cores * in ne_enclave_ioctl()
1113 ne_enclave->nr_threads_per_core)) { in ne_enclave_ioctl()
1117 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1124 rc = ne_get_cpu_from_cpu_pool(ne_enclave, &vcpu_id); in ne_enclave_ioctl()
1130 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1136 rc = ne_check_cpu_in_cpu_pool(ne_enclave, vcpu_id); in ne_enclave_ioctl()
1142 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1148 rc = ne_add_vcpu_ioctl(ne_enclave, vcpu_id); in ne_enclave_ioctl()
1150 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1155 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1169 mutex_lock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1171 if (ne_enclave->state != NE_STATE_INIT) { in ne_enclave_ioctl()
1175 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1180 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1213 mutex_lock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1215 if (ne_enclave->state != NE_STATE_INIT) { in ne_enclave_ioctl()
1219 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1224 rc = ne_set_user_memory_region_ioctl(ne_enclave, mem_region); in ne_enclave_ioctl()
1226 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1231 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1293 mutex_lock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1295 if (ne_enclave->state != NE_STATE_INIT) { in ne_enclave_ioctl()
1299 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1304 rc = ne_start_enclave_ioctl(ne_enclave, &enclave_start_info); in ne_enclave_ioctl()
1306 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1311 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_ioctl()
1334 static void ne_enclave_remove_all_mem_region_entries(struct ne_enclave *ne_enclave) in ne_enclave_remove_all_mem_region_entries() argument
1341 &ne_enclave->mem_regions_list, in ne_enclave_remove_all_mem_region_entries()
1361 static void ne_enclave_remove_all_vcpu_id_entries(struct ne_enclave *ne_enclave) in ne_enclave_remove_all_vcpu_id_entries() argument
1368 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) { in ne_enclave_remove_all_vcpu_id_entries()
1369 for_each_cpu(cpu, ne_enclave->threads_per_core[i]) in ne_enclave_remove_all_vcpu_id_entries()
1373 free_cpumask_var(ne_enclave->threads_per_core[i]); in ne_enclave_remove_all_vcpu_id_entries()
1378 kfree(ne_enclave->threads_per_core); in ne_enclave_remove_all_vcpu_id_entries()
1380 free_cpumask_var(ne_enclave->vcpu_ids); in ne_enclave_remove_all_vcpu_id_entries()
1393 static void ne_pci_dev_remove_enclave_entry(struct ne_enclave *ne_enclave, in ne_pci_dev_remove_enclave_entry() argument
1396 struct ne_enclave *ne_enclave_entry = NULL; in ne_pci_dev_remove_enclave_entry()
1397 struct ne_enclave *ne_enclave_entry_tmp = NULL; in ne_pci_dev_remove_enclave_entry()
1401 if (ne_enclave_entry->slot_uid == ne_enclave->slot_uid) { in ne_pci_dev_remove_enclave_entry()
1423 struct ne_enclave *ne_enclave = file->private_data; in ne_enclave_release() local
1429 if (!ne_enclave) in ne_enclave_release()
1436 if (!ne_enclave->slot_uid) in ne_enclave_release()
1444 mutex_lock(&ne_enclave->enclave_info_mutex); in ne_enclave_release()
1446 if (ne_enclave->state != NE_STATE_INIT && ne_enclave->state != NE_STATE_STOPPED) { in ne_enclave_release()
1447 enclave_stop_request.slot_uid = ne_enclave->slot_uid; in ne_enclave_release()
1462 slot_free_req.slot_uid = ne_enclave->slot_uid; in ne_enclave_release()
1474 ne_pci_dev_remove_enclave_entry(ne_enclave, ne_pci_dev); in ne_enclave_release()
1475 ne_enclave_remove_all_mem_region_entries(ne_enclave); in ne_enclave_release()
1476 ne_enclave_remove_all_vcpu_id_entries(ne_enclave); in ne_enclave_release()
1478 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_release()
1481 kfree(ne_enclave); in ne_enclave_release()
1486 mutex_unlock(&ne_enclave->enclave_info_mutex); in ne_enclave_release()
1504 struct ne_enclave *ne_enclave = file->private_data; in ne_enclave_poll() local
1506 poll_wait(file, &ne_enclave->eventq, wait); in ne_enclave_poll()
1508 if (ne_enclave->has_event) in ne_enclave_poll()
1542 struct ne_enclave *ne_enclave = NULL; in ne_create_vm_ioctl() local
1564 ne_enclave = kzalloc(sizeof(*ne_enclave), GFP_KERNEL); in ne_create_vm_ioctl()
1565 if (!ne_enclave) in ne_create_vm_ioctl()
1570 ne_enclave->nr_parent_vm_cores = ne_cpu_pool.nr_parent_vm_cores; in ne_create_vm_ioctl()
1571 ne_enclave->nr_threads_per_core = ne_cpu_pool.nr_threads_per_core; in ne_create_vm_ioctl()
1572 ne_enclave->numa_node = ne_cpu_pool.numa_node; in ne_create_vm_ioctl()
1576 ne_enclave->threads_per_core = kcalloc(ne_enclave->nr_parent_vm_cores, in ne_create_vm_ioctl()
1577 sizeof(*ne_enclave->threads_per_core), GFP_KERNEL); in ne_create_vm_ioctl()
1578 if (!ne_enclave->threads_per_core) { in ne_create_vm_ioctl()
1584 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) in ne_create_vm_ioctl()
1585 if (!zalloc_cpumask_var(&ne_enclave->threads_per_core[i], GFP_KERNEL)) { in ne_create_vm_ioctl()
1591 if (!zalloc_cpumask_var(&ne_enclave->vcpu_ids, GFP_KERNEL)) { in ne_create_vm_ioctl()
1607 enclave_file = anon_inode_getfile("ne-vm", &ne_enclave_fops, ne_enclave, O_RDWR); in ne_create_vm_ioctl()
1627 init_waitqueue_head(&ne_enclave->eventq); in ne_create_vm_ioctl()
1628 ne_enclave->has_event = false; in ne_create_vm_ioctl()
1629 mutex_init(&ne_enclave->enclave_info_mutex); in ne_create_vm_ioctl()
1630 ne_enclave->max_mem_regions = cmd_reply.mem_regions; in ne_create_vm_ioctl()
1631 INIT_LIST_HEAD(&ne_enclave->mem_regions_list); in ne_create_vm_ioctl()
1632 ne_enclave->mm = current->mm; in ne_create_vm_ioctl()
1633 ne_enclave->slot_uid = cmd_reply.slot_uid; in ne_create_vm_ioctl()
1634 ne_enclave->state = NE_STATE_INIT; in ne_create_vm_ioctl()
1636 list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list); in ne_create_vm_ioctl()
1638 if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) { in ne_create_vm_ioctl()
1660 free_cpumask_var(ne_enclave->vcpu_ids); in ne_create_vm_ioctl()
1661 for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) in ne_create_vm_ioctl()
1662 free_cpumask_var(ne_enclave->threads_per_core[i]); in ne_create_vm_ioctl()
1663 kfree(ne_enclave->threads_per_core); in ne_create_vm_ioctl()
1665 kfree(ne_enclave); in ne_create_vm_ioctl()