Lines Matching full:vm
39 static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu, in ioreq_complete_request() argument
52 * in which User VMs and Service VM are bound to dedicated CPU cores. in ioreq_complete_request()
64 ret = hcall_notify_req_finish(vm->vmid, vcpu); in ioreq_complete_request()
79 if (vcpu >= client->vm->vcpu_num) in acrn_ioreq_complete_request()
84 acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf; in acrn_ioreq_complete_request()
88 ret = ioreq_complete_request(client->vm, vcpu, acrn_req); in acrn_ioreq_complete_request()
93 int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu) in acrn_ioreq_request_default_complete() argument
97 spin_lock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete()
98 if (vm->default_client) in acrn_ioreq_request_default_complete()
99 ret = acrn_ioreq_complete_request(vm->default_client, in acrn_ioreq_request_default_complete()
101 spin_unlock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete()
191 vcpu = find_first_bit(ioreqs_map, client->vm->vcpu_num); in ioreq_task()
192 req = client->vm->ioreq_buf->req_slot + vcpu; in ioreq_task()
212 void acrn_ioreq_request_clear(struct acrn_vm *vm) in acrn_ioreq_request_clear() argument
220 * IO requests of this VM will be completed directly in in acrn_ioreq_request_clear()
223 set_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags); in acrn_ioreq_request_clear()
226 * acrn_ioreq_request_clear is only called in VM reset case. Simply in acrn_ioreq_request_clear()
230 spin_lock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_clear()
231 list_for_each_entry(client, &vm->ioreq_clients, list) { in acrn_ioreq_request_clear()
236 spin_unlock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_clear()
246 spin_lock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_clear()
247 client = vm->default_client; in acrn_ioreq_request_clear()
257 spin_unlock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_clear()
260 clear_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags); in acrn_ioreq_request_clear()
317 static bool handle_cf8cfc(struct acrn_vm *vm, in handle_cf8cfc() argument
326 vm->pci_conf_addr = req->reqs.pio_request.value; in handle_cf8cfc()
328 req->reqs.pio_request.value = vm->pci_conf_addr; in handle_cf8cfc()
331 if (!(vm->pci_conf_addr & CONF1_ENABLE)) { in handle_cf8cfc()
340 pci_cfg_addr = vm->pci_conf_addr; in handle_cf8cfc()
354 ioreq_complete_request(vm, vcpu, req); in handle_cf8cfc()
386 static struct acrn_ioreq_client *find_ioreq_client(struct acrn_vm *vm, in find_ioreq_client() argument
392 lockdep_assert_held(&vm->ioreq_clients_lock); in find_ioreq_client()
394 list_for_each_entry(client, &vm->ioreq_clients, list) { in find_ioreq_client()
406 return found ? found : vm->default_client; in find_ioreq_client()
411 * @vm: The VM that this client belongs to
420 struct acrn_ioreq_client *acrn_ioreq_client_create(struct acrn_vm *vm, in acrn_ioreq_client_create() argument
437 client->vm = vm; in acrn_ioreq_client_create()
447 client->thread = kthread_run(ioreq_task, client, "VM%u-%s", in acrn_ioreq_client_create()
448 client->vm->vmid, client->name); in acrn_ioreq_client_create()
455 spin_lock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_client_create()
457 vm->default_client = client; in acrn_ioreq_client_create()
459 list_add(&client->list, &vm->ioreq_clients); in acrn_ioreq_client_create()
460 spin_unlock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_client_create()
473 struct acrn_vm *vm = client->vm; in acrn_ioreq_client_destroy() local
484 spin_lock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_client_destroy()
486 vm->default_client = NULL; in acrn_ioreq_client_destroy()
489 spin_unlock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_client_destroy()
502 static int acrn_ioreq_dispatch(struct acrn_vm *vm) in acrn_ioreq_dispatch() argument
508 for (i = 0; i < vm->vcpu_num; i++) { in acrn_ioreq_dispatch()
509 req = vm->ioreq_buf->req_slot + i; in acrn_ioreq_dispatch()
515 if (test_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags)) { in acrn_ioreq_dispatch()
516 ioreq_complete_request(vm, i, req); in acrn_ioreq_dispatch()
519 if (handle_cf8cfc(vm, req, i)) in acrn_ioreq_dispatch()
522 spin_lock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_dispatch()
523 client = find_ioreq_client(vm, req); in acrn_ioreq_dispatch()
527 spin_unlock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_dispatch()
542 spin_unlock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_dispatch()
551 struct acrn_vm *vm; in ioreq_dispatcher() local
554 list_for_each_entry(vm, &acrn_vm_list, list) { in ioreq_dispatcher()
555 if (!vm->ioreq_buf) in ioreq_dispatcher()
557 acrn_ioreq_dispatch(vm); in ioreq_dispatcher()
601 int acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma) in acrn_ioreq_init() argument
607 if (vm->ioreq_buf) in acrn_ioreq_init()
622 vm->ioreq_buf = page_address(page); in acrn_ioreq_init()
623 vm->ioreq_page = page; in acrn_ioreq_init()
625 ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(set_buffer)); in acrn_ioreq_init()
629 vm->ioreq_buf = NULL; in acrn_ioreq_init()
634 "Init ioreq buffer %pK!\n", vm->ioreq_buf); in acrn_ioreq_init()
641 void acrn_ioreq_deinit(struct acrn_vm *vm) in acrn_ioreq_deinit() argument
646 "Deinit ioreq buffer %pK!\n", vm->ioreq_buf); in acrn_ioreq_deinit()
647 /* Destroy all clients belonging to this VM */ in acrn_ioreq_deinit()
648 list_for_each_entry_safe(client, next, &vm->ioreq_clients, list) in acrn_ioreq_deinit()
650 if (vm->default_client) in acrn_ioreq_deinit()
651 acrn_ioreq_client_destroy(vm->default_client); in acrn_ioreq_deinit()
653 if (vm->ioreq_buf && vm->ioreq_page) { in acrn_ioreq_deinit()
654 unpin_user_page(vm->ioreq_page); in acrn_ioreq_deinit()
655 vm->ioreq_buf = NULL; in acrn_ioreq_deinit()